python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2007 Intel Corporation. All rights reserved.
*
* Maintained at www.Open-FCoE.org
*/
/*
* PORT LOCKING NOTES
*
* These comments only apply to the 'port code' which consists of the lport,
* disc and rport blocks.
*
* MOTIVATION
*
* The lport, disc and rport blocks all have mutexes that are used to protect
* those objects. The main motivation for these locks is to prevent from
* having an lport reset just before we send a frame. In that scenario the
* lport's FID would get set to zero and then we'd send a frame with an
* invalid SID. We also need to ensure that states don't change unexpectedly
* while processing another state.
*
* HIERARCHY
*
* The following hierarchy defines the locking rules. A greater lock
* may be held before acquiring a lesser lock, but a lesser lock should never
* be held while attempting to acquire a greater lock. Here is the hierarchy-
*
* lport > disc, lport > rport, disc > rport
*
* CALLBACKS
*
* The callbacks cause complications with this scheme. There is a callback
* from the rport (to either lport or disc) and a callback from disc
* (to the lport).
*
* As rports exit the rport state machine a callback is made to the owner of
* the rport to notify success or failure. Since the callback is likely to
* cause the lport or disc to grab its lock we cannot hold the rport lock
* while making the callback. To ensure that the rport is not free'd while
* processing the callback the rport callbacks are serialized through a
* single-threaded workqueue. An rport would never be free'd while in a
* callback handler because no other rport work in this queue can be executed
* at the same time.
*
* When discovery succeeds or fails a callback is made to the lport as
* notification. Currently, successful discovery causes the lport to take no
* action. A failure will cause the lport to reset. There is likely a circular
* locking problem with this implementation.
*/
/*
* LPORT LOCKING
*
* The critical sections protected by the lport's mutex are quite broad and
* may be improved upon in the future. The lport code and its locking doesn't
* influence the I/O path, so excessive locking doesn't penalize I/O
* performance.
*
* The strategy is to lock whenever processing a request or response. Note
* that every _enter_* function corresponds to a state change. They generally
* change the lports state and then send a request out on the wire. We lock
* before calling any of these functions to protect that state change. This
* means that the entry points into the lport block manage the locks while
* the state machine can transition between states (i.e. _enter_* functions)
* while always staying protected.
*
* When handling responses we also hold the lport mutex broadly. When the
* lport receives the response frame it locks the mutex and then calls the
* appropriate handler for the particuar response. Generally a response will
* trigger a state change and so the lock must already be held.
*
* Retries also have to consider the locking. The retries occur from a work
* context and the work function will lock the lport and then retry the state
* (i.e. _enter_* function).
*/
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/libfc.h>
#include <linux/scatterlist.h>
#include "fc_encode.h"
#include "fc_libfc.h"
/* Fabric IDs to use for point-to-point mode, chosen on whims. */
#define FC_LOCAL_PTP_FID_LO 0x010101
#define FC_LOCAL_PTP_FID_HI 0x010102
#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
#define MAX_CT_PAYLOAD 2048
#define DISCOVERED_PORTS 4
#define NUMBER_OF_PORTS 1
static void fc_lport_error(struct fc_lport *, struct fc_frame *);
static void fc_lport_enter_reset(struct fc_lport *);
static void fc_lport_enter_flogi(struct fc_lport *);
static void fc_lport_enter_dns(struct fc_lport *);
static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
static void fc_lport_enter_scr(struct fc_lport *);
static void fc_lport_enter_ready(struct fc_lport *);
static void fc_lport_enter_logo(struct fc_lport *);
static void fc_lport_enter_fdmi(struct fc_lport *lport);
static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state);
static const char *fc_lport_state_names[] = {
[LPORT_ST_DISABLED] = "disabled",
[LPORT_ST_FLOGI] = "FLOGI",
[LPORT_ST_DNS] = "dNS",
[LPORT_ST_RNN_ID] = "RNN_ID",
[LPORT_ST_RSNN_NN] = "RSNN_NN",
[LPORT_ST_RSPN_ID] = "RSPN_ID",
[LPORT_ST_RFT_ID] = "RFT_ID",
[LPORT_ST_RFF_ID] = "RFF_ID",
[LPORT_ST_FDMI] = "FDMI",
[LPORT_ST_RHBA] = "RHBA",
[LPORT_ST_RPA] = "RPA",
[LPORT_ST_DHBA] = "DHBA",
[LPORT_ST_DPRT] = "DPRT",
[LPORT_ST_SCR] = "SCR",
[LPORT_ST_READY] = "Ready",
[LPORT_ST_LOGO] = "LOGO",
[LPORT_ST_RESET] = "reset",
};
/**
* struct fc_bsg_info - FC Passthrough managemet structure
* @job: The passthrough job
* @lport: The local port to pass through a command
* @rsp_code: The expected response code
* @sg: job->reply_payload.sg_list
* @nents: job->reply_payload.sg_cnt
* @offset: The offset into the response data
*/
struct fc_bsg_info {
struct bsg_job *job;
struct fc_lport *lport;
u16 rsp_code;
struct scatterlist *sg;
u32 nents;
size_t offset;
};
/**
* fc_frame_drop() - Dummy frame handler
* @lport: The local port the frame was received on
* @fp: The received frame
*/
static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
{
fc_frame_free(fp);
return 0;
}
/**
* fc_lport_rport_callback() - Event handler for rport events
* @lport: The lport which is receiving the event
* @rdata: private remote port data
* @event: The event that occurred
*
* Locking Note: The rport lock should not be held when calling
* this function.
*/
static void fc_lport_rport_callback(struct fc_lport *lport,
struct fc_rport_priv *rdata,
enum fc_rport_event event)
{
FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
rdata->ids.port_id);
mutex_lock(&lport->lp_mutex);
switch (event) {
case RPORT_EV_READY:
if (lport->state == LPORT_ST_DNS) {
lport->dns_rdata = rdata;
fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
} else if (lport->state == LPORT_ST_FDMI) {
lport->ms_rdata = rdata;
fc_lport_enter_ms(lport, LPORT_ST_DHBA);
} else {
FC_LPORT_DBG(lport, "Received an READY event "
"on port (%6.6x) for the directory "
"server, but the lport is not "
"in the DNS or FDMI state, it's in the "
"%d state", rdata->ids.port_id,
lport->state);
fc_rport_logoff(rdata);
}
break;
case RPORT_EV_LOGO:
case RPORT_EV_FAILED:
case RPORT_EV_STOP:
if (rdata->ids.port_id == FC_FID_DIR_SERV)
lport->dns_rdata = NULL;
else if (rdata->ids.port_id == FC_FID_MGMT_SERV)
lport->ms_rdata = NULL;
break;
case RPORT_EV_NONE:
break;
}
mutex_unlock(&lport->lp_mutex);
}
/**
* fc_lport_state() - Return a string which represents the lport's state
* @lport: The lport whose state is to converted to a string
*/
static const char *fc_lport_state(struct fc_lport *lport)
{
const char *cp;
cp = fc_lport_state_names[lport->state];
if (!cp)
cp = "unknown";
return cp;
}
/**
* fc_lport_ptp_setup() - Create an rport for point-to-point mode
* @lport: The lport to attach the ptp rport to
* @remote_fid: The FID of the ptp rport
* @remote_wwpn: The WWPN of the ptp rport
* @remote_wwnn: The WWNN of the ptp rport
*/
static void fc_lport_ptp_setup(struct fc_lport *lport,
u32 remote_fid, u64 remote_wwpn,
u64 remote_wwnn)
{
lockdep_assert_held(&lport->lp_mutex);
if (lport->ptp_rdata) {
fc_rport_logoff(lport->ptp_rdata);
kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
}
mutex_lock(&lport->disc.disc_mutex);
lport->ptp_rdata = fc_rport_create(lport, remote_fid);
kref_get(&lport->ptp_rdata->kref);
lport->ptp_rdata->ids.port_name = remote_wwpn;
lport->ptp_rdata->ids.node_name = remote_wwnn;
mutex_unlock(&lport->disc.disc_mutex);
fc_rport_login(lport->ptp_rdata);
fc_lport_enter_ready(lport);
}
/**
* fc_get_host_port_state() - Return the port state of the given Scsi_Host
* @shost: The SCSI host whose port state is to be determined
*/
void fc_get_host_port_state(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
mutex_lock(&lport->lp_mutex);
if (!lport->link_up)
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
else
switch (lport->state) {
case LPORT_ST_READY:
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
break;
default:
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
}
mutex_unlock(&lport->lp_mutex);
}
EXPORT_SYMBOL(fc_get_host_port_state);
/**
* fc_get_host_speed() - Return the speed of the given Scsi_Host
* @shost: The SCSI host whose port speed is to be determined
*/
void fc_get_host_speed(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
fc_host_speed(shost) = lport->link_speed;
}
EXPORT_SYMBOL(fc_get_host_speed);
/**
* fc_get_host_stats() - Return the Scsi_Host's statistics
* @shost: The SCSI host whose statistics are to be returned
*/
struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
{
struct fc_host_statistics *fc_stats;
struct fc_lport *lport = shost_priv(shost);
unsigned int cpu;
u64 fcp_in_bytes = 0;
u64 fcp_out_bytes = 0;
fc_stats = &lport->host_stats;
memset(fc_stats, 0, sizeof(struct fc_host_statistics));
fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
for_each_possible_cpu(cpu) {
struct fc_stats *stats;
stats = per_cpu_ptr(lport->stats, cpu);
fc_stats->tx_frames += READ_ONCE(stats->TxFrames);
fc_stats->tx_words += READ_ONCE(stats->TxWords);
fc_stats->rx_frames += READ_ONCE(stats->RxFrames);
fc_stats->rx_words += READ_ONCE(stats->RxWords);
fc_stats->error_frames += READ_ONCE(stats->ErrorFrames);
fc_stats->invalid_crc_count += READ_ONCE(stats->InvalidCRCCount);
fc_stats->fcp_input_requests += READ_ONCE(stats->InputRequests);
fc_stats->fcp_output_requests += READ_ONCE(stats->OutputRequests);
fc_stats->fcp_control_requests += READ_ONCE(stats->ControlRequests);
fcp_in_bytes += READ_ONCE(stats->InputBytes);
fcp_out_bytes += READ_ONCE(stats->OutputBytes);
fc_stats->fcp_packet_alloc_failures += READ_ONCE(stats->FcpPktAllocFails);
fc_stats->fcp_packet_aborts += READ_ONCE(stats->FcpPktAborts);
fc_stats->fcp_frame_alloc_failures += READ_ONCE(stats->FcpFrameAllocFails);
fc_stats->link_failure_count += READ_ONCE(stats->LinkFailureCount);
}
fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
fc_stats->lip_count = -1;
fc_stats->nos_count = -1;
fc_stats->loss_of_sync_count = -1;
fc_stats->loss_of_signal_count = -1;
fc_stats->prim_seq_protocol_err_count = -1;
fc_stats->dumped_frames = -1;
/* update exches stats */
fc_exch_update_stats(lport);
return fc_stats;
}
EXPORT_SYMBOL(fc_get_host_stats);
/**
* fc_lport_flogi_fill() - Fill in FLOGI command for request
* @lport: The local port the FLOGI is for
* @flogi: The FLOGI command
* @op: The opcode
*/
static void fc_lport_flogi_fill(struct fc_lport *lport,
struct fc_els_flogi *flogi,
unsigned int op)
{
struct fc_els_csp *sp;
struct fc_els_cssp *cp;
memset(flogi, 0, sizeof(*flogi));
flogi->fl_cmd = (u8) op;
put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
sp = &flogi->fl_csp;
sp->sp_hi_ver = 0x20;
sp->sp_lo_ver = 0x20;
sp->sp_bb_cred = htons(10); /* this gets set by gateway */
sp->sp_bb_data = htons((u16) lport->mfs);
cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
if (op != ELS_FLOGI) {
sp->sp_features = htons(FC_SP_FT_CIRO);
sp->sp_tot_seq = htons(255); /* seq. we accept */
sp->sp_rel_off = htons(0x1f);
sp->sp_e_d_tov = htonl(lport->e_d_tov);
cp->cp_rdfs = htons((u16) lport->mfs);
cp->cp_con_seq = htons(255);
cp->cp_open_seq = 1;
}
}
/**
* fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
* @lport: The local port to add a new FC-4 type to
* @type: The new FC-4 type
*/
static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
{
__be32 *mp;
mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
*mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
}
/**
* fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
* @lport: Fibre Channel local port receiving the RLIR
* @fp: The RLIR request frame
*/
static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
{
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
fc_lport_state(lport));
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_frame_free(fp);
}
/**
* fc_lport_recv_echo_req() - Handle received ECHO request
* @lport: The local port receiving the ECHO
* @in_fp: ECHO request frame
*/
static void fc_lport_recv_echo_req(struct fc_lport *lport,
struct fc_frame *in_fp)
{
struct fc_frame *fp;
unsigned int len;
void *pp;
void *dp;
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
fc_lport_state(lport));
len = fr_len(in_fp) - sizeof(struct fc_frame_header);
pp = fc_frame_payload_get(in_fp, len);
if (len < sizeof(__be32))
len = sizeof(__be32);
fp = fc_frame_alloc(lport, len);
if (fp) {
dp = fc_frame_payload_get(fp, len);
memcpy(dp, pp, len);
*((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
}
fc_frame_free(in_fp);
}
/**
* fc_lport_recv_rnid_req() - Handle received Request Node ID data request
* @lport: The local port receiving the RNID
* @in_fp: The RNID request frame
*/
static void fc_lport_recv_rnid_req(struct fc_lport *lport,
struct fc_frame *in_fp)
{
struct fc_frame *fp;
struct fc_els_rnid *req;
struct {
struct fc_els_rnid_resp rnid;
struct fc_els_rnid_cid cid;
struct fc_els_rnid_gen gen;
} *rp;
struct fc_seq_els_data rjt_data;
u8 fmt;
size_t len;
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
fc_lport_state(lport));
req = fc_frame_payload_get(in_fp, sizeof(*req));
if (!req) {
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
} else {
fmt = req->rnid_fmt;
len = sizeof(*rp);
if (fmt != ELS_RNIDF_GEN ||
ntohl(lport->rnid_gen.rnid_atype) == 0) {
fmt = ELS_RNIDF_NONE; /* nothing to provide */
len -= sizeof(rp->gen);
}
fp = fc_frame_alloc(lport, len);
if (fp) {
rp = fc_frame_payload_get(fp, len);
memset(rp, 0, len);
rp->rnid.rnid_cmd = ELS_LS_ACC;
rp->rnid.rnid_fmt = fmt;
rp->rnid.rnid_cid_len = sizeof(rp->cid);
rp->cid.rnid_wwpn = htonll(lport->wwpn);
rp->cid.rnid_wwnn = htonll(lport->wwnn);
if (fmt == ELS_RNIDF_GEN) {
rp->rnid.rnid_sid_len = sizeof(rp->gen);
memcpy(&rp->gen, &lport->rnid_gen,
sizeof(rp->gen));
}
fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
}
}
fc_frame_free(in_fp);
}
/**
* fc_lport_recv_logo_req() - Handle received fabric LOGO request
* @lport: The local port receiving the LOGO
* @fp: The LOGO request frame
*/
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
lockdep_assert_held(&lport->lp_mutex);
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
fc_lport_enter_reset(lport);
fc_frame_free(fp);
}
/**
* fc_fabric_login() - Start the lport state machine
* @lport: The local port that should log into the fabric
*
* Locking Note: This function should not be called
* with the lport lock held.
*/
int fc_fabric_login(struct fc_lport *lport)
{
int rc = -1;
mutex_lock(&lport->lp_mutex);
if (lport->state == LPORT_ST_DISABLED ||
lport->state == LPORT_ST_LOGO) {
fc_lport_state_enter(lport, LPORT_ST_RESET);
fc_lport_enter_reset(lport);
rc = 0;
}
mutex_unlock(&lport->lp_mutex);
return rc;
}
EXPORT_SYMBOL(fc_fabric_login);
/**
* __fc_linkup() - Handler for transport linkup events
* @lport: The lport whose link is up
*/
void __fc_linkup(struct fc_lport *lport)
{
lockdep_assert_held(&lport->lp_mutex);
if (!lport->link_up) {
lport->link_up = 1;
if (lport->state == LPORT_ST_RESET)
fc_lport_enter_flogi(lport);
}
}
/**
* fc_linkup() - Handler for transport linkup events
* @lport: The local port whose link is up
*/
void fc_linkup(struct fc_lport *lport)
{
printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
lport->host->host_no, lport->port_id);
mutex_lock(&lport->lp_mutex);
__fc_linkup(lport);
mutex_unlock(&lport->lp_mutex);
}
EXPORT_SYMBOL(fc_linkup);
/**
* __fc_linkdown() - Handler for transport linkdown events
* @lport: The lport whose link is down
*/
void __fc_linkdown(struct fc_lport *lport)
{
lockdep_assert_held(&lport->lp_mutex);
if (lport->link_up) {
lport->link_up = 0;
fc_lport_enter_reset(lport);
lport->tt.fcp_cleanup(lport);
}
}
/**
* fc_linkdown() - Handler for transport linkdown events
* @lport: The local port whose link is down
*/
void fc_linkdown(struct fc_lport *lport)
{
printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
lport->host->host_no, lport->port_id);
mutex_lock(&lport->lp_mutex);
__fc_linkdown(lport);
mutex_unlock(&lport->lp_mutex);
}
EXPORT_SYMBOL(fc_linkdown);
/**
* fc_fabric_logoff() - Logout of the fabric
* @lport: The local port to logoff the fabric
*
* Return value:
* 0 for success, -1 for failure
*/
int fc_fabric_logoff(struct fc_lport *lport)
{
lport->tt.disc_stop_final(lport);
mutex_lock(&lport->lp_mutex);
if (lport->dns_rdata)
fc_rport_logoff(lport->dns_rdata);
mutex_unlock(&lport->lp_mutex);
fc_rport_flush_queue();
mutex_lock(&lport->lp_mutex);
fc_lport_enter_logo(lport);
mutex_unlock(&lport->lp_mutex);
cancel_delayed_work_sync(&lport->retry_work);
return 0;
}
EXPORT_SYMBOL(fc_fabric_logoff);
/**
* fc_lport_destroy() - Unregister a fc_lport
* @lport: The local port to unregister
*
* Note:
* exit routine for fc_lport instance
* clean-up all the allocated memory
* and free up other system resources.
*
*/
int fc_lport_destroy(struct fc_lport *lport)
{
mutex_lock(&lport->lp_mutex);
lport->state = LPORT_ST_DISABLED;
lport->link_up = 0;
lport->tt.frame_send = fc_frame_drop;
mutex_unlock(&lport->lp_mutex);
lport->tt.fcp_abort_io(lport);
lport->tt.disc_stop_final(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
cancel_delayed_work_sync(&lport->retry_work);
fc_fc4_del_lport(lport);
return 0;
}
EXPORT_SYMBOL(fc_lport_destroy);
/**
* fc_set_mfs() - Set the maximum frame size for a local port
* @lport: The local port to set the MFS for
* @mfs: The new MFS
*/
int fc_set_mfs(struct fc_lport *lport, u32 mfs)
{
unsigned int old_mfs;
int rc = -EINVAL;
mutex_lock(&lport->lp_mutex);
old_mfs = lport->mfs;
if (mfs >= FC_MIN_MAX_FRAME) {
mfs &= ~3;
if (mfs > FC_MAX_FRAME)
mfs = FC_MAX_FRAME;
mfs -= sizeof(struct fc_frame_header);
lport->mfs = mfs;
rc = 0;
}
if (!rc && mfs < old_mfs)
fc_lport_enter_reset(lport);
mutex_unlock(&lport->lp_mutex);
return rc;
}
EXPORT_SYMBOL(fc_set_mfs);
/**
* fc_lport_disc_callback() - Callback for discovery events
* @lport: The local port receiving the event
* @event: The discovery event
*/
static void fc_lport_disc_callback(struct fc_lport *lport,
enum fc_disc_event event)
{
switch (event) {
case DISC_EV_SUCCESS:
FC_LPORT_DBG(lport, "Discovery succeeded\n");
break;
case DISC_EV_FAILED:
printk(KERN_ERR "host%d: libfc: "
"Discovery failed for port (%6.6x)\n",
lport->host->host_no, lport->port_id);
mutex_lock(&lport->lp_mutex);
fc_lport_enter_reset(lport);
mutex_unlock(&lport->lp_mutex);
break;
case DISC_EV_NONE:
WARN_ON(1);
break;
}
}
/**
* fc_lport_enter_ready() - Enter the ready state and start discovery
* @lport: The local port that is ready
*/
static void fc_lport_enter_ready(struct fc_lport *lport)
{
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered READY from state %s\n",
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_READY);
if (lport->vport)
fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
fc_vports_linkchange(lport);
if (!lport->ptp_rdata)
lport->tt.disc_start(fc_lport_disc_callback, lport);
}
/**
* fc_lport_set_port_id() - set the local port Port ID
* @lport: The local port which will have its Port ID set.
* @port_id: The new port ID.
* @fp: The frame containing the incoming request, or NULL.
*/
static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
struct fc_frame *fp)
{
lockdep_assert_held(&lport->lp_mutex);
if (port_id)
printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
lport->host->host_no, port_id);
lport->port_id = port_id;
/* Update the fc_host */
fc_host_port_id(lport->host) = port_id;
if (lport->tt.lport_set_port_id)
lport->tt.lport_set_port_id(lport, port_id, fp);
}
/**
* fc_lport_set_local_id() - set the local port Port ID for point-to-multipoint
* @lport: The local port which will have its Port ID set.
* @port_id: The new port ID.
*
* Called by the lower-level driver when transport sets the local port_id.
* This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
* discovery to be skipped.
*/
void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
{
mutex_lock(&lport->lp_mutex);
fc_lport_set_port_id(lport, port_id, NULL);
switch (lport->state) {
case LPORT_ST_RESET:
case LPORT_ST_FLOGI:
if (port_id)
fc_lport_enter_ready(lport);
break;
default:
break;
}
mutex_unlock(&lport->lp_mutex);
}
EXPORT_SYMBOL(fc_lport_set_local_id);
/**
* fc_lport_recv_flogi_req() - Receive a FLOGI request
* @lport: The local port that received the request
* @rx_fp: The FLOGI frame
*
* A received FLOGI request indicates a point-to-point connection.
* Accept it with the common service parameters indicating our N port.
* Set up to do a PLOGI if we have the higher-number WWPN.
*/
static void fc_lport_recv_flogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
{
struct fc_frame *fp;
struct fc_frame_header *fh;
struct fc_els_flogi *flp;
struct fc_els_flogi *new_flp;
u64 remote_wwpn;
u32 remote_fid;
u32 local_fid;
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
fc_lport_state(lport));
remote_fid = fc_frame_sid(rx_fp);
flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
if (!flp)
goto out;
remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
if (remote_wwpn == lport->wwpn) {
printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
"with same WWPN %16.16llx\n",
lport->host->host_no, remote_wwpn);
goto out;
}
FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
/*
* XXX what is the right thing to do for FIDs?
* The originator might expect our S_ID to be 0xfffffe.
* But if so, both of us could end up with the same FID.
*/
local_fid = FC_LOCAL_PTP_FID_LO;
if (remote_wwpn < lport->wwpn) {
local_fid = FC_LOCAL_PTP_FID_HI;
if (!remote_fid || remote_fid == local_fid)
remote_fid = FC_LOCAL_PTP_FID_LO;
} else if (!remote_fid) {
remote_fid = FC_LOCAL_PTP_FID_HI;
}
fc_lport_set_port_id(lport, local_fid, rx_fp);
fp = fc_frame_alloc(lport, sizeof(*flp));
if (fp) {
new_flp = fc_frame_payload_get(fp, sizeof(*flp));
fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
new_flp->fl_cmd = (u8) ELS_LS_ACC;
/*
* Send the response. If this fails, the originator should
* repeat the sequence.
*/
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
fh = fc_frame_header_get(fp);
hton24(fh->fh_s_id, local_fid);
hton24(fh->fh_d_id, remote_fid);
lport->tt.frame_send(lport, fp);
} else {
fc_lport_error(lport, fp);
}
fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
get_unaligned_be64(&flp->fl_wwnn));
out:
fc_frame_free(rx_fp);
}
/**
* fc_lport_recv_els_req() - The generic lport ELS request handler
* @lport: The local port that received the request
* @fp: The request frame
*
* This function will see if the lport handles the request or
* if an rport should handle the request.
*
* Locking Note: This function should not be called with the lport
* lock held because it will grab the lock.
*/
static void fc_lport_recv_els_req(struct fc_lport *lport,
struct fc_frame *fp)
{
mutex_lock(&lport->lp_mutex);
/*
* Handle special ELS cases like FLOGI, LOGO, and
* RSCN here. These don't require a session.
* Even if we had a session, it might not be ready.
*/
if (!lport->link_up)
fc_frame_free(fp);
else {
/*
* Check opcode.
*/
switch (fc_frame_payload_op(fp)) {
case ELS_FLOGI:
if (!lport->point_to_multipoint)
fc_lport_recv_flogi_req(lport, fp);
else
fc_rport_recv_req(lport, fp);
break;
case ELS_LOGO:
if (fc_frame_sid(fp) == FC_FID_FLOGI)
fc_lport_recv_logo_req(lport, fp);
else
fc_rport_recv_req(lport, fp);
break;
case ELS_RSCN:
lport->tt.disc_recv_req(lport, fp);
break;
case ELS_ECHO:
fc_lport_recv_echo_req(lport, fp);
break;
case ELS_RLIR:
fc_lport_recv_rlir_req(lport, fp);
break;
case ELS_RNID:
fc_lport_recv_rnid_req(lport, fp);
break;
default:
fc_rport_recv_req(lport, fp);
break;
}
}
mutex_unlock(&lport->lp_mutex);
}
static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
const struct fc_els_spp *spp_in,
struct fc_els_spp *spp_out)
{
return FC_SPP_RESP_INVL;
}
struct fc4_prov fc_lport_els_prov = {
.prli = fc_lport_els_prli,
.recv = fc_lport_recv_els_req,
};
/**
* fc_lport_recv() - The generic lport request handler
* @lport: The lport that received the request
* @fp: The frame the request is in
*
* Locking Note: This function should not be called with the lport
* lock held because it may grab the lock.
*/
void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_seq *sp = fr_seq(fp);
struct fc4_prov *prov;
/*
* Use RCU read lock and module_lock to be sure module doesn't
* deregister and get unloaded while we're calling it.
* try_module_get() is inlined and accepts a NULL parameter.
* Only ELSes and FCP target ops should come through here.
* The locking is unfortunate, and a better scheme is being sought.
*/
rcu_read_lock();
if (fh->fh_type >= FC_FC4_PROV_SIZE)
goto drop;
prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
if (!prov || !try_module_get(prov->module))
goto drop;
rcu_read_unlock();
prov->recv(lport, fp);
module_put(prov->module);
return;
drop:
rcu_read_unlock();
FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
fc_frame_free(fp);
if (sp)
fc_exch_done(sp);
}
EXPORT_SYMBOL(fc_lport_recv);
/**
* fc_lport_reset() - Reset a local port
* @lport: The local port which should be reset
*
* Locking Note: This functions should not be called with the
* lport lock held.
*/
int fc_lport_reset(struct fc_lport *lport)
{
cancel_delayed_work_sync(&lport->retry_work);
mutex_lock(&lport->lp_mutex);
fc_lport_enter_reset(lport);
mutex_unlock(&lport->lp_mutex);
return 0;
}
EXPORT_SYMBOL(fc_lport_reset);
/**
* fc_lport_reset_locked() - Reset the local port w/ the lport lock held
* @lport: The local port to be reset
*/
static void fc_lport_reset_locked(struct fc_lport *lport)
{
lockdep_assert_held(&lport->lp_mutex);
if (lport->dns_rdata) {
fc_rport_logoff(lport->dns_rdata);
lport->dns_rdata = NULL;
}
if (lport->ptp_rdata) {
fc_rport_logoff(lport->ptp_rdata);
kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
lport->ptp_rdata = NULL;
}
lport->tt.disc_stop(lport);
lport->tt.exch_mgr_reset(lport, 0, 0);
fc_host_fabric_name(lport->host) = 0;
if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
fc_lport_set_port_id(lport, 0, NULL);
}
/**
* fc_lport_enter_reset() - Reset the local port
* @lport: The local port to be reset
*/
static void fc_lport_enter_reset(struct fc_lport *lport)
{
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
fc_lport_state(lport));
if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
return;
if (lport->vport) {
if (lport->link_up)
fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
else
fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
}
fc_lport_state_enter(lport, LPORT_ST_RESET);
fc_host_post_event(lport->host, fc_get_event_number(),
FCH_EVT_LIPRESET, 0);
fc_vports_linkchange(lport);
fc_lport_reset_locked(lport);
if (lport->link_up)
fc_lport_enter_flogi(lport);
}
/**
* fc_lport_enter_disabled() - Disable the local port
* @lport: The local port to be reset
*/
static void fc_lport_enter_disabled(struct fc_lport *lport)
{
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_DISABLED);
fc_vports_linkchange(lport);
fc_lport_reset_locked(lport);
}
/**
* fc_lport_error() - Handler for any errors
* @lport: The local port that the error was on
* @fp: The error code encoded in a frame pointer
*
* If the error was caused by a resource allocation failure
* then wait for half a second and retry, otherwise retry
* after the e_d_tov time.
*/
static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
{
unsigned long delay = 0;
FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport),
lport->retry_count);
if (PTR_ERR(fp) == -FC_EX_CLOSED)
return;
/*
* Memory allocation failure, or the exchange timed out
* or we received LS_RJT.
* Retry after delay
*/
if (lport->retry_count < lport->max_retry_count) {
lport->retry_count++;
if (!fp)
delay = msecs_to_jiffies(500);
else
delay = msecs_to_jiffies(lport->e_d_tov);
schedule_delayed_work(&lport->retry_work, delay);
} else
fc_lport_enter_reset(lport);
}
/**
* fc_lport_ns_resp() - Handle response to a name server
* registration exchange
* @sp: current sequence in exchange
* @fp: response frame
* @lp_arg: Fibre Channel host port instance
*
* Locking Note: This function will be called without the lport lock
* held, but it will lock, call an _enter_* function or fc_lport_error()
* and then unlock the lport.
*/
static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
void *lp_arg)
{
struct fc_lport *lport = lp_arg;
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
FC_LPORT_DBG(lport, "Received a name server response, "
"but in state %s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
fh = fc_frame_header_get(fp);
ct = fc_frame_payload_get(fp, sizeof(*ct));
if (fh && ct && fh->fh_type == FC_TYPE_CT &&
ct->ct_fs_type == FC_FST_DIR &&
ct->ct_fs_subtype == FC_NS_SUBTYPE &&
ntohs(ct->ct_cmd) == FC_FS_ACC)
switch (lport->state) {
case LPORT_ST_RNN_ID:
fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
break;
case LPORT_ST_RSNN_NN:
fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
break;
case LPORT_ST_RSPN_ID:
fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
break;
case LPORT_ST_RFT_ID:
fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
break;
case LPORT_ST_RFF_ID:
if (lport->fdmi_enabled)
fc_lport_enter_fdmi(lport);
else
fc_lport_enter_scr(lport);
break;
default:
/* should have already been caught by state checks */
break;
}
else
fc_lport_error(lport, fp);
out:
fc_frame_free(fp);
err:
mutex_unlock(&lport->lp_mutex);
}
/**
* fc_lport_ms_resp() - Handle response to a management server
* exchange
* @sp: current sequence in exchange
* @fp: response frame
* @lp_arg: Fibre Channel host port instance
*
* Locking Note: This function will be called without the lport lock
* held, but it will lock, call an _enter_* function or fc_lport_error()
* and then unlock the lport.
*/
static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
void *lp_arg)
{
struct fc_lport *lport = lp_arg;
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) {
FC_LPORT_DBG(lport, "Received a management server response, "
"but in state %s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
fh = fc_frame_header_get(fp);
ct = fc_frame_payload_get(fp, sizeof(*ct));
if (fh && ct && fh->fh_type == FC_TYPE_CT &&
ct->ct_fs_type == FC_FST_MGMT &&
ct->ct_fs_subtype == FC_FDMI_SUBTYPE) {
FC_LPORT_DBG(lport, "Received a management server response, "
"reason=%d explain=%d\n",
ct->ct_reason,
ct->ct_explan);
switch (lport->state) {
case LPORT_ST_RHBA:
if ((ntohs(ct->ct_cmd) == FC_FS_RJT) && fc_host->fdmi_version == FDMI_V2) {
FC_LPORT_DBG(lport, "Error for FDMI-V2, fall back to FDMI-V1\n");
fc_host->fdmi_version = FDMI_V1;
fc_lport_enter_ms(lport, LPORT_ST_RHBA);
} else if (ntohs(ct->ct_cmd) == FC_FS_ACC)
fc_lport_enter_ms(lport, LPORT_ST_RPA);
else /* Error Skip RPA */
fc_lport_enter_scr(lport);
break;
case LPORT_ST_RPA:
fc_lport_enter_scr(lport);
break;
case LPORT_ST_DPRT:
fc_lport_enter_ms(lport, LPORT_ST_RHBA);
break;
case LPORT_ST_DHBA:
fc_lport_enter_ms(lport, LPORT_ST_DPRT);
break;
default:
/* should have already been caught by state checks */
break;
}
} else {
/* Invalid Frame? */
fc_lport_error(lport, fp);
}
out:
fc_frame_free(fp);
err:
mutex_unlock(&lport->lp_mutex);
}
/**
* fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
* @sp: current sequence in SCR exchange
* @fp: response frame
* @lp_arg: Fibre Channel lport port instance that sent the registration request
*
* Locking Note: This function will be called without the lport lock
* held, but it will lock, call an _enter_* function or fc_lport_error
* and then unlock the lport.
*/
static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
void *lp_arg)
{
struct fc_lport *lport = lp_arg;
u8 op;
FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
if (lport->state != LPORT_ST_SCR) {
FC_LPORT_DBG(lport, "Received a SCR response, but in state "
"%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC)
fc_lport_enter_ready(lport);
else
fc_lport_error(lport, fp);
out:
fc_frame_free(fp);
err:
mutex_unlock(&lport->lp_mutex);
}
/**
* fc_lport_enter_scr() - Send a SCR (State Change Register) request
* @lport: The local port to register for state changes
*/
static void fc_lport_enter_scr(struct fc_lport *lport)
{
struct fc_frame *fp;
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_SCR);
fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
if (!fp) {
fc_lport_error(lport, fp);
return;
}
if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
fc_lport_scr_resp, lport,
2 * lport->r_a_tov))
fc_lport_error(lport, NULL);
}
/**
* fc_lport_enter_ns() - register some object with the name server
* @lport: Fibre Channel local port to register
* @state: Local port state
*/
static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
{
struct fc_frame *fp;
enum fc_ns_req cmd;
int size = sizeof(struct fc_ct_hdr);
size_t len;
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
fc_lport_state_names[state],
fc_lport_state(lport));
fc_lport_state_enter(lport, state);
switch (state) {
case LPORT_ST_RNN_ID:
cmd = FC_NS_RNN_ID;
size += sizeof(struct fc_ns_rn_id);
break;
case LPORT_ST_RSNN_NN:
len = strnlen(fc_host_symbolic_name(lport->host), 255);
/* if there is no symbolic name, skip to RFT_ID */
if (!len)
return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
cmd = FC_NS_RSNN_NN;
size += sizeof(struct fc_ns_rsnn) + len;
break;
case LPORT_ST_RSPN_ID:
len = strnlen(fc_host_symbolic_name(lport->host), 255);
/* if there is no symbolic name, skip to RFT_ID */
if (!len)
return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
cmd = FC_NS_RSPN_ID;
size += sizeof(struct fc_ns_rspn) + len;
break;
case LPORT_ST_RFT_ID:
cmd = FC_NS_RFT_ID;
size += sizeof(struct fc_ns_rft);
break;
case LPORT_ST_RFF_ID:
cmd = FC_NS_RFF_ID;
size += sizeof(struct fc_ns_rff_id);
break;
default:
fc_lport_error(lport, NULL);
return;
}
fp = fc_frame_alloc(lport, size);
if (!fp) {
fc_lport_error(lport, fp);
return;
}
if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
fc_lport_ns_resp,
lport, 3 * lport->r_a_tov))
fc_lport_error(lport, fp);
}
static struct fc_rport_operations fc_lport_rport_ops = {
.event_callback = fc_lport_rport_callback,
};
/**
* fc_lport_enter_dns() - Create a fc_rport for the name server
* @lport: The local port requesting a remote port for the name server
*/
static void fc_lport_enter_dns(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_DNS);
mutex_lock(&lport->disc.disc_mutex);
rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
fc_rport_login(rdata);
return;
err:
fc_lport_error(lport, NULL);
}
/**
* fc_lport_enter_ms() - management server commands
* @lport: Fibre Channel local port to register
* @state: Local port state
*/
static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
{
struct fc_frame *fp;
enum fc_fdmi_req cmd;
int size = sizeof(struct fc_ct_hdr);
size_t len;
int numattrs;
struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
fc_lport_state_names[state],
fc_lport_state(lport));
fc_lport_state_enter(lport, state);
switch (state) {
case LPORT_ST_RHBA:
cmd = FC_FDMI_RHBA;
/* Number of HBA Attributes */
numattrs = 11;
len = sizeof(struct fc_fdmi_rhba);
len -= sizeof(struct fc_fdmi_attr_entry);
len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
len += FC_FDMI_HBA_ATTR_MODEL_LEN;
len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
if (fc_host->fdmi_version == FDMI_V2) {
numattrs += 7;
len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
}
len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
size += len;
break;
case LPORT_ST_RPA:
cmd = FC_FDMI_RPA;
/* Number of Port Attributes */
numattrs = 6;
len = sizeof(struct fc_fdmi_rpa);
len -= sizeof(struct fc_fdmi_attr_entry);
len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
if (fc_host->fdmi_version == FDMI_V2) {
numattrs += 10;
len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
len += FC_FDMI_PORT_ATTR_PORTID_LEN;
}
len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
size += len;
break;
case LPORT_ST_DPRT:
cmd = FC_FDMI_DPRT;
len = sizeof(struct fc_fdmi_dprt);
size += len;
break;
case LPORT_ST_DHBA:
cmd = FC_FDMI_DHBA;
len = sizeof(struct fc_fdmi_dhba);
size += len;
break;
default:
fc_lport_error(lport, NULL);
return;
}
FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n",
cmd, (int)len, size);
fp = fc_frame_alloc(lport, size);
if (!fp) {
fc_lport_error(lport, fp);
return;
}
if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd,
fc_lport_ms_resp,
lport, 3 * lport->r_a_tov))
fc_lport_error(lport, fp);
}
/**
* fc_lport_enter_fdmi() - Create a fc_rport for the management server
* @lport: The local port requesting a remote port for the management server
*/
static void fc_lport_enter_fdmi(struct fc_lport *lport)
{
struct fc_rport_priv *rdata;
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_FDMI);
mutex_lock(&lport->disc.disc_mutex);
rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
mutex_unlock(&lport->disc.disc_mutex);
if (!rdata)
goto err;
rdata->ops = &fc_lport_rport_ops;
fc_rport_login(rdata);
return;
err:
fc_lport_error(lport, NULL);
}
/**
* fc_lport_timeout() - Handler for the retry_work timer
* @work: The work struct of the local port
*/
static void fc_lport_timeout(struct work_struct *work)
{
struct fc_lport *lport =
container_of(work, struct fc_lport,
retry_work.work);
struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
mutex_lock(&lport->lp_mutex);
switch (lport->state) {
case LPORT_ST_DISABLED:
break;
case LPORT_ST_READY:
break;
case LPORT_ST_RESET:
break;
case LPORT_ST_FLOGI:
fc_lport_enter_flogi(lport);
break;
case LPORT_ST_DNS:
fc_lport_enter_dns(lport);
break;
case LPORT_ST_RNN_ID:
case LPORT_ST_RSNN_NN:
case LPORT_ST_RSPN_ID:
case LPORT_ST_RFT_ID:
case LPORT_ST_RFF_ID:
fc_lport_enter_ns(lport, lport->state);
break;
case LPORT_ST_FDMI:
fc_lport_enter_fdmi(lport);
break;
case LPORT_ST_RHBA:
if (fc_host->fdmi_version == FDMI_V2) {
FC_LPORT_DBG(lport, "timeout for FDMI-V2 RHBA,fall back to FDMI-V1\n");
fc_host->fdmi_version = FDMI_V1;
fc_lport_enter_ms(lport, LPORT_ST_RHBA);
break;
}
fallthrough;
case LPORT_ST_RPA:
case LPORT_ST_DHBA:
case LPORT_ST_DPRT:
FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
fc_lport_state(lport));
fallthrough;
case LPORT_ST_SCR:
fc_lport_enter_scr(lport);
break;
case LPORT_ST_LOGO:
fc_lport_enter_logo(lport);
break;
}
mutex_unlock(&lport->lp_mutex);
}
/**
* fc_lport_logo_resp() - Handle response to LOGO request
* @sp: The sequence that the LOGO was on
* @fp: The LOGO frame
* @lp_arg: The lport port that received the LOGO request
*
* Locking Note: This function will be called without the lport lock
* held, but it will lock, call an _enter_* function or fc_lport_error()
* and then unlock the lport.
*/
void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
void *lp_arg)
{
struct fc_lport *lport = lp_arg;
u8 op;
FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
if (lport->state != LPORT_ST_LOGO) {
FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
"%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC)
fc_lport_enter_disabled(lport);
else
fc_lport_error(lport, fp);
out:
fc_frame_free(fp);
err:
mutex_unlock(&lport->lp_mutex);
}
EXPORT_SYMBOL(fc_lport_logo_resp);
/**
* fc_lport_enter_logo() - Logout of the fabric
* @lport: The local port to be logged out
*/
static void fc_lport_enter_logo(struct fc_lport *lport)
{
struct fc_frame *fp;
struct fc_els_logo *logo;
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_LOGO);
fc_vports_linkchange(lport);
fp = fc_frame_alloc(lport, sizeof(*logo));
if (!fp) {
fc_lport_error(lport, fp);
return;
}
if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
fc_lport_logo_resp, lport,
2 * lport->r_a_tov))
fc_lport_error(lport, NULL);
}
/**
* fc_lport_flogi_resp() - Handle response to FLOGI request
* @sp: The sequence that the FLOGI was on
* @fp: The FLOGI response frame
* @lp_arg: The lport port that received the FLOGI response
*
* Locking Note: This function will be called without the lport lock
* held, but it will lock, call an _enter_* function or fc_lport_error()
* and then unlock the lport.
*/
void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
void *lp_arg)
{
struct fc_lport *lport = lp_arg;
struct fc_frame_header *fh;
struct fc_els_flogi *flp;
u32 did;
u16 csp_flags;
unsigned int r_a_tov;
unsigned int e_d_tov;
u16 mfs;
FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
return;
mutex_lock(&lport->lp_mutex);
if (lport->state != LPORT_ST_FLOGI) {
FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
"%s\n", fc_lport_state(lport));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_lport_error(lport, fp);
goto err;
}
fh = fc_frame_header_get(fp);
did = fc_frame_did(fp);
if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 ||
fc_frame_payload_op(fp) != ELS_LS_ACC) {
FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
fc_lport_error(lport, fp);
goto out;
}
flp = fc_frame_payload_get(fp, sizeof(*flp));
if (!flp) {
FC_LPORT_DBG(lport, "FLOGI bad response\n");
fc_lport_error(lport, fp);
goto out;
}
mfs = ntohs(flp->fl_csp.sp_bb_data) &
FC_SP_BB_DATA_MASK;
if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
"lport->mfs:%u\n", mfs, lport->mfs);
fc_lport_error(lport, fp);
goto out;
}
if (mfs <= lport->mfs) {
lport->mfs = mfs;
fc_host_maxframe_size(lport->host) = mfs;
}
csp_flags = ntohs(flp->fl_csp.sp_features);
r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
if (csp_flags & FC_SP_FT_EDTR)
e_d_tov /= 1000000;
lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
if ((csp_flags & FC_SP_FT_FPORT) == 0) {
if (e_d_tov > lport->e_d_tov)
lport->e_d_tov = e_d_tov;
lport->r_a_tov = 2 * lport->e_d_tov;
fc_lport_set_port_id(lport, did, fp);
printk(KERN_INFO "host%d: libfc: "
"Port (%6.6x) entered "
"point-to-point mode\n",
lport->host->host_no, did);
fc_lport_ptp_setup(lport, fc_frame_sid(fp),
get_unaligned_be64(
&flp->fl_wwpn),
get_unaligned_be64(
&flp->fl_wwnn));
} else {
if (e_d_tov > lport->e_d_tov)
lport->e_d_tov = e_d_tov;
if (r_a_tov > lport->r_a_tov)
lport->r_a_tov = r_a_tov;
fc_host_fabric_name(lport->host) =
get_unaligned_be64(&flp->fl_wwnn);
fc_lport_set_port_id(lport, did, fp);
fc_lport_enter_dns(lport);
}
out:
fc_frame_free(fp);
err:
mutex_unlock(&lport->lp_mutex);
}
EXPORT_SYMBOL(fc_lport_flogi_resp);
/**
* fc_lport_enter_flogi() - Send a FLOGI request to the fabric manager
* @lport: Fibre Channel local port to be logged in to the fabric
*/
static void fc_lport_enter_flogi(struct fc_lport *lport)
{
struct fc_frame *fp;
lockdep_assert_held(&lport->lp_mutex);
FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
fc_lport_state(lport));
fc_lport_state_enter(lport, LPORT_ST_FLOGI);
if (lport->point_to_multipoint) {
if (lport->port_id)
fc_lport_enter_ready(lport);
return;
}
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp)
return fc_lport_error(lport, fp);
if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
lport->vport ? ELS_FDISC : ELS_FLOGI,
fc_lport_flogi_resp, lport,
lport->vport ? 2 * lport->r_a_tov :
lport->e_d_tov))
fc_lport_error(lport, NULL);
}
/**
* fc_lport_config() - Configure a fc_lport
* @lport: The local port to be configured
*/
int fc_lport_config(struct fc_lport *lport)
{
INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
mutex_init(&lport->lp_mutex);
fc_lport_state_enter(lport, LPORT_ST_DISABLED);
fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
fc_lport_add_fc4_type(lport, FC_TYPE_CT);
fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
return 0;
}
EXPORT_SYMBOL(fc_lport_config);
/**
* fc_lport_init() - Initialize the lport layer for a local port
* @lport: The local port to initialize the exchange layer for
*/
int fc_lport_init(struct fc_lport *lport)
{
struct fc_host_attrs *fc_host;
fc_host = shost_to_fc_host(lport->host);
/* Set FDMI version to FDMI-2 specification*/
fc_host->fdmi_version = FDMI_V2;
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
fc_host_node_name(lport->host) = lport->wwnn;
fc_host_port_name(lport->host) = lport->wwpn;
fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
memset(fc_host_supported_fc4s(lport->host), 0,
sizeof(fc_host_supported_fc4s(lport->host)));
fc_host_supported_fc4s(lport->host)[2] = 1;
fc_host_supported_fc4s(lport->host)[7] = 1;
fc_host_num_discovered_ports(lport->host) = 4;
/* This value is also unchanging */
memset(fc_host_active_fc4s(lport->host), 0,
sizeof(fc_host_active_fc4s(lport->host)));
fc_host_active_fc4s(lport->host)[2] = 1;
fc_host_active_fc4s(lport->host)[7] = 1;
fc_host_maxframe_size(lport->host) = lport->mfs;
fc_host_supported_speeds(lport->host) = 0;
if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_40GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_40GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_25GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_25GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_50GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_50GBIT;
if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT)
fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT;
fc_fc4_add_lport(lport);
fc_host_num_discovered_ports(lport->host) = DISCOVERED_PORTS;
fc_host_port_state(lport->host) = FC_PORTSTATE_ONLINE;
fc_host_max_ct_payload(lport->host) = MAX_CT_PAYLOAD;
fc_host_num_ports(lport->host) = NUMBER_OF_PORTS;
fc_host_bootbios_state(lport->host) = 0X00000000;
snprintf(fc_host_bootbios_version(lport->host),
FC_SYMBOLIC_NAME_SIZE, "%s", "Unknown");
return 0;
}
EXPORT_SYMBOL(fc_lport_init);
/**
* fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
* @sp: The sequence for the FC Passthrough response
* @fp: The response frame
* @info_arg: The BSG info that the response is for
*/
static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
void *info_arg)
{
struct fc_bsg_info *info = info_arg;
struct bsg_job *job = info->job;
struct fc_bsg_reply *bsg_reply = job->reply;
struct fc_lport *lport = info->lport;
struct fc_frame_header *fh;
size_t len;
void *buf;
if (IS_ERR(fp)) {
bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
-ECONNABORTED : -ETIMEDOUT;
job->reply_len = sizeof(uint32_t);
bsg_job_done(job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
kfree(info);
return;
}
mutex_lock(&lport->lp_mutex);
fh = fc_frame_header_get(fp);
len = fr_len(fp) - sizeof(*fh);
buf = fc_frame_payload_get(fp, 0);
if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
/* Get the response code from the first frame payload */
unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
(unsigned short)fc_frame_payload_op(fp);
/* Save the reply status of the job */
bsg_reply->reply_data.ctels_reply.status =
(cmd == info->rsp_code) ?
FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
}
bsg_reply->reply_payload_rcv_len +=
fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
&info->offset, NULL);
if (fr_eof(fp) == FC_EOF_T &&
(ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
if (bsg_reply->reply_payload_rcv_len >
job->reply_payload.payload_len)
bsg_reply->reply_payload_rcv_len =
job->reply_payload.payload_len;
bsg_reply->result = 0;
bsg_job_done(job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
kfree(info);
}
fc_frame_free(fp);
mutex_unlock(&lport->lp_mutex);
}
/**
* fc_lport_els_request() - Send ELS passthrough request
* @job: The BSG Passthrough job
* @lport: The local port sending the request
* @did: The destination port id
* @tov: The timeout period (in ms)
*/
static int fc_lport_els_request(struct bsg_job *job,
struct fc_lport *lport,
u32 did, u32 tov)
{
struct fc_bsg_info *info;
struct fc_frame *fp;
struct fc_frame_header *fh;
char *pp;
int len;
lockdep_assert_held(&lport->lp_mutex);
fp = fc_frame_alloc(lport, job->request_payload.payload_len);
if (!fp)
return -ENOMEM;
len = job->request_payload.payload_len;
pp = fc_frame_payload_get(fp, len);
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
pp, len);
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_ELS_REQ;
hton24(fh->fh_d_id, did);
hton24(fh->fh_s_id, lport->port_id);
fh->fh_type = FC_TYPE_ELS;
hton24(fh->fh_f_ctl, FC_FCTL_REQ);
fh->fh_cs_ctl = 0;
fh->fh_df_ctl = 0;
fh->fh_parm_offset = 0;
info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
if (!info) {
fc_frame_free(fp);
return -ENOMEM;
}
info->job = job;
info->lport = lport;
info->rsp_code = ELS_LS_ACC;
info->nents = job->reply_payload.sg_cnt;
info->sg = job->reply_payload.sg_list;
if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
NULL, info, tov)) {
kfree(info);
return -ECOMM;
}
return 0;
}
/**
* fc_lport_ct_request() - Send CT Passthrough request
* @job: The BSG Passthrough job
* @lport: The local port sending the request
* @did: The destination FC-ID
* @tov: The timeout period to wait for the response
*/
static int fc_lport_ct_request(struct bsg_job *job,
struct fc_lport *lport, u32 did, u32 tov)
{
struct fc_bsg_info *info;
struct fc_frame *fp;
struct fc_frame_header *fh;
struct fc_ct_req *ct;
size_t len;
lockdep_assert_held(&lport->lp_mutex);
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
job->request_payload.payload_len);
if (!fp)
return -ENOMEM;
len = job->request_payload.payload_len;
ct = fc_frame_payload_get(fp, len);
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
ct, len);
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
hton24(fh->fh_d_id, did);
hton24(fh->fh_s_id, lport->port_id);
fh->fh_type = FC_TYPE_CT;
hton24(fh->fh_f_ctl, FC_FCTL_REQ);
fh->fh_cs_ctl = 0;
fh->fh_df_ctl = 0;
fh->fh_parm_offset = 0;
info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
if (!info) {
fc_frame_free(fp);
return -ENOMEM;
}
info->job = job;
info->lport = lport;
info->rsp_code = FC_FS_ACC;
info->nents = job->reply_payload.sg_cnt;
info->sg = job->reply_payload.sg_list;
if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
NULL, info, tov)) {
kfree(info);
return -ECOMM;
}
return 0;
}
/**
* fc_lport_bsg_request() - The common entry point for sending
* FC Passthrough requests
* @job: The BSG passthrough job
*/
int fc_lport_bsg_request(struct bsg_job *job)
{
struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
struct Scsi_Host *shost = fc_bsg_to_shost(job);
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport;
struct fc_rport_priv *rdata;
int rc = -EINVAL;
u32 did, tov;
bsg_reply->reply_payload_rcv_len = 0;
mutex_lock(&lport->lp_mutex);
switch (bsg_request->msgcode) {
case FC_BSG_RPT_ELS:
rport = fc_bsg_to_rport(job);
if (!rport)
break;
rdata = rport->dd_data;
rc = fc_lport_els_request(job, lport, rport->port_id,
rdata->e_d_tov);
break;
case FC_BSG_RPT_CT:
rport = fc_bsg_to_rport(job);
if (!rport)
break;
rdata = rport->dd_data;
rc = fc_lport_ct_request(job, lport, rport->port_id,
rdata->e_d_tov);
break;
case FC_BSG_HST_CT:
did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
if (did == FC_FID_DIR_SERV) {
rdata = lport->dns_rdata;
if (!rdata)
break;
tov = rdata->e_d_tov;
} else {
rdata = fc_rport_lookup(lport, did);
if (!rdata)
break;
tov = rdata->e_d_tov;
kref_put(&rdata->kref, fc_rport_destroy);
}
rc = fc_lport_ct_request(job, lport, did, tov);
break;
case FC_BSG_HST_ELS_NOLOGIN:
did = ntoh24(bsg_request->rqst_data.h_els.port_id);
rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
break;
}
mutex_unlock(&lport->lp_mutex);
return rc;
}
EXPORT_SYMBOL(fc_lport_bsg_request);
| linux-master | drivers/scsi/libfc/fc_lport.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
*
* Maintained at www.Open-FCoE.org
*/
/*
* Target Discovery
*
* This block discovers all FC-4 remote ports, including FCP initiators. It
* also handles RSCN events and re-discovery if necessary.
*/
/*
* DISC LOCKING
*
* The disc mutex is can be locked when acquiring rport locks, but may not
* be held when acquiring the lport lock. Refer to fc_lport.c for more
* details.
*/
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/list.h>
#include <asm/unaligned.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/libfc.h>
#include "fc_libfc.h"
#define FC_DISC_RETRY_LIMIT 3 /* max retries */
#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
static void fc_disc_gpn_ft_req(struct fc_disc *);
static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
static void fc_disc_done(struct fc_disc *, enum fc_disc_event);
static void fc_disc_timeout(struct work_struct *);
static int fc_disc_single(struct fc_lport *, struct fc_disc_port *);
static void fc_disc_restart(struct fc_disc *);
/**
* fc_disc_stop_rports() - Delete all the remote ports associated with the lport
* @disc: The discovery job to stop remote ports on
*/
static void fc_disc_stop_rports(struct fc_disc *disc)
{
struct fc_rport_priv *rdata;
lockdep_assert_held(&disc->disc_mutex);
list_for_each_entry(rdata, &disc->rports, peers) {
if (kref_get_unless_zero(&rdata->kref)) {
fc_rport_logoff(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
}
/**
* fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
* @disc: The discovery object to which the RSCN applies
* @fp: The RSCN frame
*/
static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
{
struct fc_lport *lport;
struct fc_els_rscn *rp;
struct fc_els_rscn_page *pp;
struct fc_seq_els_data rjt_data;
unsigned int len;
int redisc = 0;
enum fc_els_rscn_addr_fmt fmt;
LIST_HEAD(disc_ports);
struct fc_disc_port *dp, *next;
lockdep_assert_held(&disc->disc_mutex);
lport = fc_disc_lport(disc);
FC_DISC_DBG(disc, "Received an RSCN event\n");
/* make sure the frame contains an RSCN message */
rp = fc_frame_payload_get(fp, sizeof(*rp));
if (!rp)
goto reject;
/* make sure the page length is as expected (4 bytes) */
if (rp->rscn_page_len != sizeof(*pp))
goto reject;
/* get the RSCN payload length */
len = ntohs(rp->rscn_plen);
if (len < sizeof(*rp))
goto reject;
/* make sure the frame contains the expected payload */
rp = fc_frame_payload_get(fp, len);
if (!rp)
goto reject;
/* payload must be a multiple of the RSCN page size */
len -= sizeof(*rp);
if (len % sizeof(*pp))
goto reject;
for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
fmt &= ELS_RSCN_ADDR_FMT_MASK;
/*
* if we get an address format other than port
* (area, domain, fabric), then do a full discovery
*/
switch (fmt) {
case ELS_ADDR_FMT_PORT:
FC_DISC_DBG(disc, "Port address format for port "
"(%6.6x)\n", ntoh24(pp->rscn_fid));
dp = kzalloc(sizeof(*dp), GFP_KERNEL);
if (!dp) {
redisc = 1;
break;
}
dp->lp = lport;
dp->port_id = ntoh24(pp->rscn_fid);
list_add_tail(&dp->peers, &disc_ports);
break;
case ELS_ADDR_FMT_AREA:
case ELS_ADDR_FMT_DOM:
case ELS_ADDR_FMT_FAB:
default:
FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
redisc = 1;
break;
}
}
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
/*
* If not doing a complete rediscovery, do GPN_ID on
* the individual ports mentioned in the list.
* If any of these get an error, do a full rediscovery.
* In any case, go through the list and free the entries.
*/
list_for_each_entry_safe(dp, next, &disc_ports, peers) {
list_del(&dp->peers);
if (!redisc)
redisc = fc_disc_single(lport, dp);
kfree(dp);
}
if (redisc) {
FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
fc_disc_restart(disc);
} else {
FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
"redisc %d state %d in_prog %d\n",
redisc, lport->state, disc->pending);
}
fc_frame_free(fp);
return;
reject:
FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
/**
* fc_disc_recv_req() - Handle incoming requests
* @lport: The local port receiving the request
* @fp: The request frame
*
* Locking Note: This function is called from the EM and will lock
* the disc_mutex before calling the handler for the
* request.
*/
static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
u8 op;
struct fc_disc *disc = &lport->disc;
op = fc_frame_payload_op(fp);
switch (op) {
case ELS_RSCN:
mutex_lock(&disc->disc_mutex);
fc_disc_recv_rscn_req(disc, fp);
mutex_unlock(&disc->disc_mutex);
break;
default:
FC_DISC_DBG(disc, "Received an unsupported request, "
"the opcode is (%x)\n", op);
fc_frame_free(fp);
break;
}
}
/**
* fc_disc_restart() - Restart discovery
* @disc: The discovery object to be restarted
*/
static void fc_disc_restart(struct fc_disc *disc)
{
lockdep_assert_held(&disc->disc_mutex);
if (!disc->disc_callback)
return;
FC_DISC_DBG(disc, "Restarting discovery\n");
disc->requested = 1;
if (disc->pending)
return;
/*
* Advance disc_id. This is an arbitrary non-zero number that will
* match the value in the fc_rport_priv after discovery for all
* freshly-discovered remote ports. Avoid wrapping to zero.
*/
disc->disc_id = (disc->disc_id + 2) | 1;
disc->retry_count = 0;
fc_disc_gpn_ft_req(disc);
}
/**
* fc_disc_start() - Start discovery on a local port
* @lport: The local port to have discovery started on
* @disc_callback: Callback function to be called when discovery is complete
*/
static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
enum fc_disc_event),
struct fc_lport *lport)
{
struct fc_disc *disc = &lport->disc;
/*
* At this point we may have a new disc job or an existing
* one. Either way, let's lock when we make changes to it
* and send the GPN_FT request.
*/
mutex_lock(&disc->disc_mutex);
disc->disc_callback = disc_callback;
fc_disc_restart(disc);
mutex_unlock(&disc->disc_mutex);
}
/**
* fc_disc_done() - Discovery has been completed
* @disc: The discovery context
* @event: The discovery completion status
*/
static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
{
struct fc_lport *lport = fc_disc_lport(disc);
struct fc_rport_priv *rdata;
lockdep_assert_held(&disc->disc_mutex);
FC_DISC_DBG(disc, "Discovery complete\n");
disc->pending = 0;
if (disc->requested) {
fc_disc_restart(disc);
return;
}
/*
* Go through all remote ports. If they were found in the latest
* discovery, reverify or log them in. Otherwise, log them out.
* Skip ports which were never discovered. These are the dNS port
* and ports which were created by PLOGI.
*
* We don't need to use the _rcu variant here as the rport list
* is protected by the disc mutex which is already held on entry.
*/
list_for_each_entry(rdata, &disc->rports, peers) {
if (!kref_get_unless_zero(&rdata->kref))
continue;
if (rdata->disc_id) {
if (rdata->disc_id == disc->disc_id)
fc_rport_login(rdata);
else
fc_rport_logoff(rdata);
}
kref_put(&rdata->kref, fc_rport_destroy);
}
mutex_unlock(&disc->disc_mutex);
disc->disc_callback(lport, event);
mutex_lock(&disc->disc_mutex);
}
/**
* fc_disc_error() - Handle error on dNS request
* @disc: The discovery context
* @fp: The error code encoded as a frame pointer
*/
static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
{
struct fc_lport *lport = fc_disc_lport(disc);
unsigned long delay = 0;
FC_DISC_DBG(disc, "Error %d, retries %d/%d\n",
PTR_ERR_OR_ZERO(fp), disc->retry_count,
FC_DISC_RETRY_LIMIT);
if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
/*
* Memory allocation failure, or the exchange timed out,
* retry after delay.
*/
if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
/* go ahead and retry */
if (!fp)
delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
else {
delay = msecs_to_jiffies(lport->e_d_tov);
/* timeout faster first time */
if (!disc->retry_count)
delay /= 4;
}
disc->retry_count++;
schedule_delayed_work(&disc->disc_work, delay);
} else
fc_disc_done(disc, DISC_EV_FAILED);
} else if (PTR_ERR(fp) == -FC_EX_CLOSED) {
/*
* if discovery fails due to lport reset, clear
* pending flag so that subsequent discovery can
* continue
*/
disc->pending = 0;
}
}
/**
* fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
* @disc: The discovery context
*/
static void fc_disc_gpn_ft_req(struct fc_disc *disc)
{
struct fc_frame *fp;
struct fc_lport *lport = fc_disc_lport(disc);
lockdep_assert_held(&disc->disc_mutex);
WARN_ON(!fc_lport_test_ready(lport));
disc->pending = 1;
disc->requested = 0;
disc->buf_len = 0;
disc->seq_count = 0;
fp = fc_frame_alloc(lport,
sizeof(struct fc_ct_hdr) +
sizeof(struct fc_ns_gid_ft));
if (!fp)
goto err;
if (lport->tt.elsct_send(lport, 0, fp,
FC_NS_GPN_FT,
fc_disc_gpn_ft_resp,
disc, 3 * lport->r_a_tov))
return;
err:
fc_disc_error(disc, NULL);
}
/**
* fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
* @disc: The discovery context
* @buf: The GPN_FT response buffer
* @len: The size of response buffer
*
* Goes through the list of IDs and names resulting from a request.
*/
static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
{
struct fc_lport *lport;
struct fc_gpn_ft_resp *np;
char *bp;
size_t plen;
size_t tlen;
int error = 0;
struct fc_rport_identifiers ids;
struct fc_rport_priv *rdata;
lport = fc_disc_lport(disc);
disc->seq_count++;
/*
* Handle partial name record left over from previous call.
*/
bp = buf;
plen = len;
np = (struct fc_gpn_ft_resp *)bp;
tlen = disc->buf_len;
disc->buf_len = 0;
if (tlen) {
WARN_ON(tlen >= sizeof(*np));
plen = sizeof(*np) - tlen;
WARN_ON(plen <= 0);
WARN_ON(plen >= sizeof(*np));
if (plen > len)
plen = len;
np = &disc->partial_buf;
memcpy((char *)np + tlen, bp, plen);
/*
* Set bp so that the loop below will advance it to the
* first valid full name element.
*/
bp -= tlen;
len += tlen;
plen += tlen;
disc->buf_len = (unsigned char) plen;
if (plen == sizeof(*np))
disc->buf_len = 0;
}
/*
* Handle full name records, including the one filled from above.
* Normally, np == bp and plen == len, but from the partial case above,
* bp, len describe the overall buffer, and np, plen describe the
* partial buffer, which if would usually be full now.
* After the first time through the loop, things return to "normal".
*/
while (plen >= sizeof(*np)) {
ids.port_id = ntoh24(np->fp_fid);
ids.port_name = ntohll(np->fp_wwpn);
if (ids.port_id != lport->port_id &&
ids.port_name != lport->wwpn) {
rdata = fc_rport_create(lport, ids.port_id);
if (rdata) {
rdata->ids.port_name = ids.port_name;
rdata->disc_id = disc->disc_id;
} else {
printk(KERN_WARNING "libfc: Failed to allocate "
"memory for the newly discovered port "
"(%6.6x)\n", ids.port_id);
error = -ENOMEM;
}
}
if (np->fp_flags & FC_NS_FID_LAST) {
fc_disc_done(disc, DISC_EV_SUCCESS);
len = 0;
break;
}
len -= sizeof(*np);
bp += sizeof(*np);
np = (struct fc_gpn_ft_resp *)bp;
plen = len;
}
/*
* Save any partial record at the end of the buffer for next time.
*/
if (error == 0 && len > 0 && len < sizeof(*np)) {
if (np != &disc->partial_buf) {
FC_DISC_DBG(disc, "Partial buffer remains "
"for discovery\n");
memcpy(&disc->partial_buf, np, len);
}
disc->buf_len = (unsigned char) len;
}
return error;
}
/**
* fc_disc_timeout() - Handler for discovery timeouts
* @work: Structure holding discovery context that needs to retry discovery
*/
static void fc_disc_timeout(struct work_struct *work)
{
struct fc_disc *disc = container_of(work,
struct fc_disc,
disc_work.work);
mutex_lock(&disc->disc_mutex);
fc_disc_gpn_ft_req(disc);
mutex_unlock(&disc->disc_mutex);
}
/**
* fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
* @sp: The sequence that the GPN_FT response was received on
* @fp: The GPN_FT response frame
* @disc_arg: The discovery context
*
* Locking Note: This function is called without disc mutex held, and
* should do all its processing with the mutex held
*/
static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
void *disc_arg)
{
struct fc_disc *disc = disc_arg;
struct fc_ct_hdr *cp;
struct fc_frame_header *fh;
enum fc_disc_event event = DISC_EV_NONE;
unsigned int seq_cnt;
unsigned int len;
int error = 0;
mutex_lock(&disc->disc_mutex);
FC_DISC_DBG(disc, "Received a GPN_FT response\n");
if (IS_ERR(fp)) {
fc_disc_error(disc, fp);
mutex_unlock(&disc->disc_mutex);
return;
}
WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
fh = fc_frame_header_get(fp);
len = fr_len(fp) - sizeof(*fh);
seq_cnt = ntohs(fh->fh_seq_cnt);
if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) {
cp = fc_frame_payload_get(fp, sizeof(*cp));
if (!cp) {
FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
fr_len(fp));
event = DISC_EV_FAILED;
} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
/* Accepted, parse the response. */
len -= sizeof(*cp);
error = fc_disc_gpn_ft_parse(disc, cp + 1, len);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
"(check zoning)\n", cp->ct_reason,
cp->ct_explan);
event = DISC_EV_FAILED;
if (cp->ct_reason == FC_FS_RJT_UNABL &&
cp->ct_explan == FC_FS_EXP_FTNR)
event = DISC_EV_SUCCESS;
} else {
FC_DISC_DBG(disc, "GPN_FT unexpected response code "
"%x\n", ntohs(cp->ct_cmd));
event = DISC_EV_FAILED;
}
} else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) {
error = fc_disc_gpn_ft_parse(disc, fh + 1, len);
} else {
FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
"seq_cnt %x expected %x sof %x eof %x\n",
seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
event = DISC_EV_FAILED;
}
if (error)
fc_disc_error(disc, ERR_PTR(error));
else if (event != DISC_EV_NONE)
fc_disc_done(disc, event);
fc_frame_free(fp);
mutex_unlock(&disc->disc_mutex);
}
/**
* fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID)
* @sp: The sequence the GPN_ID is on
* @fp: The response frame
* @rdata_arg: The remote port that sent the GPN_ID response
*
* Locking Note: This function is called without disc mutex held.
*/
static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
void *rdata_arg)
{
struct fc_rport_priv *rdata = rdata_arg;
struct fc_rport_priv *new_rdata;
struct fc_lport *lport;
struct fc_disc *disc;
struct fc_ct_hdr *cp;
struct fc_ns_gid_pn *pn;
u64 port_name;
lport = rdata->local_port;
disc = &lport->disc;
if (PTR_ERR(fp) == -FC_EX_CLOSED)
goto out;
if (IS_ERR(fp)) {
mutex_lock(&disc->disc_mutex);
fc_disc_restart(disc);
mutex_unlock(&disc->disc_mutex);
goto out;
}
cp = fc_frame_payload_get(fp, sizeof(*cp));
if (!cp)
goto redisc;
if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
if (fr_len(fp) < sizeof(struct fc_frame_header) +
sizeof(*cp) + sizeof(*pn))
goto redisc;
pn = (struct fc_ns_gid_pn *)(cp + 1);
port_name = get_unaligned_be64(&pn->fn_wwpn);
mutex_lock(&rdata->rp_mutex);
if (rdata->ids.port_name == -1)
rdata->ids.port_name = port_name;
else if (rdata->ids.port_name != port_name) {
FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
"Port-id %6.6x wwpn %16.16llx\n",
rdata->ids.port_id, port_name);
mutex_unlock(&rdata->rp_mutex);
fc_rport_logoff(rdata);
mutex_lock(&lport->disc.disc_mutex);
new_rdata = fc_rport_create(lport, rdata->ids.port_id);
mutex_unlock(&lport->disc.disc_mutex);
if (new_rdata) {
new_rdata->disc_id = disc->disc_id;
fc_rport_login(new_rdata);
}
goto free_fp;
}
rdata->disc_id = disc->disc_id;
mutex_unlock(&rdata->rp_mutex);
fc_rport_login(rdata);
} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
cp->ct_reason, cp->ct_explan);
fc_rport_logoff(rdata);
} else {
FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
ntohs(cp->ct_cmd));
redisc:
mutex_lock(&disc->disc_mutex);
fc_disc_restart(disc);
mutex_unlock(&disc->disc_mutex);
}
free_fp:
fc_frame_free(fp);
out:
kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request
* @lport: The local port to initiate discovery on
* @rdata: remote port private data
*
* On failure, an error code is returned.
*/
static int fc_disc_gpn_id_req(struct fc_lport *lport,
struct fc_rport_priv *rdata)
{
struct fc_frame *fp;
lockdep_assert_held(&lport->disc.disc_mutex);
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
sizeof(struct fc_ns_fid));
if (!fp)
return -ENOMEM;
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID,
fc_disc_gpn_id_resp, rdata,
3 * lport->r_a_tov))
return -ENOMEM;
kref_get(&rdata->kref);
return 0;
}
/**
* fc_disc_single() - Discover the directory information for a single target
* @lport: The local port the remote port is associated with
* @dp: The port to rediscover
*/
static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
{
struct fc_rport_priv *rdata;
lockdep_assert_held(&lport->disc.disc_mutex);
rdata = fc_rport_create(lport, dp->port_id);
if (!rdata)
return -ENOMEM;
rdata->disc_id = 0;
return fc_disc_gpn_id_req(lport, rdata);
}
/**
* fc_disc_stop() - Stop discovery for a given lport
* @lport: The local port that discovery should stop on
*/
static void fc_disc_stop(struct fc_lport *lport)
{
struct fc_disc *disc = &lport->disc;
if (disc->pending)
cancel_delayed_work_sync(&disc->disc_work);
mutex_lock(&disc->disc_mutex);
fc_disc_stop_rports(disc);
mutex_unlock(&disc->disc_mutex);
}
/**
* fc_disc_stop_final() - Stop discovery for a given lport
* @lport: The lport that discovery should stop on
*
* This function will block until discovery has been
* completely stopped and all rports have been deleted.
*/
static void fc_disc_stop_final(struct fc_lport *lport)
{
fc_disc_stop(lport);
fc_rport_flush_queue();
}
/**
* fc_disc_config() - Configure the discovery layer for a local port
* @lport: The local port that needs the discovery layer to be configured
* @priv: Private data structre for users of the discovery layer
*/
void fc_disc_config(struct fc_lport *lport, void *priv)
{
struct fc_disc *disc;
if (!lport->tt.disc_start)
lport->tt.disc_start = fc_disc_start;
if (!lport->tt.disc_stop)
lport->tt.disc_stop = fc_disc_stop;
if (!lport->tt.disc_stop_final)
lport->tt.disc_stop_final = fc_disc_stop_final;
if (!lport->tt.disc_recv_req)
lport->tt.disc_recv_req = fc_disc_recv_req;
disc = &lport->disc;
disc->priv = priv;
}
EXPORT_SYMBOL(fc_disc_config);
/**
* fc_disc_init() - Initialize the discovery layer for a local port
* @lport: The local port that needs the discovery layer to be initialized
*/
void fc_disc_init(struct fc_lport *lport)
{
struct fc_disc *disc = &lport->disc;
INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
mutex_init(&disc->disc_mutex);
INIT_LIST_HEAD(&disc->rports);
}
EXPORT_SYMBOL(fc_disc_init);
| linux-master | drivers/scsi/libfc/fc_disc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2009 Intel Corporation. All rights reserved.
*
* Maintained at www.Open-FCoE.org
*/
/*
* NPIV VN_Port helper functions for libfc
*/
#include <scsi/libfc.h>
#include <linux/export.h>
/**
* libfc_vport_create() - Create a new NPIV vport instance
* @vport: fc_vport structure from scsi_transport_fc
* @privsize: driver private data size to allocate along with the Scsi_Host
*/
struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
{
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
struct fc_lport *vn_port;
vn_port = libfc_host_alloc(shost->hostt, privsize);
if (!vn_port)
return vn_port;
vn_port->vport = vport;
vport->dd_data = vn_port;
mutex_lock(&n_port->lp_mutex);
list_add_tail(&vn_port->list, &n_port->vports);
mutex_unlock(&n_port->lp_mutex);
return vn_port;
}
EXPORT_SYMBOL(libfc_vport_create);
/**
* fc_vport_id_lookup() - find NPIV lport that matches a given fabric ID
* @n_port: Top level N_Port which may have multiple NPIV VN_Ports
* @port_id: Fabric ID to find a match for
*
* Returns: matching lport pointer or NULL if there is no match
*/
struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
{
struct fc_lport *lport = NULL;
struct fc_lport *vn_port;
if (n_port->port_id == port_id)
return n_port;
if (port_id == FC_FID_FLOGI)
return n_port; /* for point-to-point */
mutex_lock(&n_port->lp_mutex);
list_for_each_entry(vn_port, &n_port->vports, list) {
if (vn_port->port_id == port_id) {
lport = vn_port;
break;
}
}
mutex_unlock(&n_port->lp_mutex);
return lport;
}
EXPORT_SYMBOL(fc_vport_id_lookup);
/*
* When setting the link state of vports during an lport state change, it's
* necessary to hold the lp_mutex of both the N_Port and the VN_Port.
* This tells the lockdep engine to treat the nested locking of the VN_Port
* as a different lock class.
*/
enum libfc_lport_mutex_class {
LPORT_MUTEX_NORMAL = 0,
LPORT_MUTEX_VN_PORT = 1,
};
/**
* __fc_vport_setlink() - update link and status on a VN_Port
* @n_port: parent N_Port
* @vn_port: VN_Port to update
*
* Locking: must be called with both the N_Port and VN_Port lp_mutex held
*/
static void __fc_vport_setlink(struct fc_lport *n_port,
struct fc_lport *vn_port)
{
struct fc_vport *vport = vn_port->vport;
if (vn_port->state == LPORT_ST_DISABLED)
return;
if (n_port->state == LPORT_ST_READY) {
if (n_port->npiv_enabled) {
fc_vport_set_state(vport, FC_VPORT_INITIALIZING);
__fc_linkup(vn_port);
} else {
fc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
__fc_linkdown(vn_port);
}
} else {
fc_vport_set_state(vport, FC_VPORT_LINKDOWN);
__fc_linkdown(vn_port);
}
}
/**
* fc_vport_setlink() - update link and status on a VN_Port
* @vn_port: virtual port to update
*/
void fc_vport_setlink(struct fc_lport *vn_port)
{
struct fc_vport *vport = vn_port->vport;
struct Scsi_Host *shost = vport_to_shost(vport);
struct fc_lport *n_port = shost_priv(shost);
mutex_lock(&n_port->lp_mutex);
mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT);
__fc_vport_setlink(n_port, vn_port);
mutex_unlock(&vn_port->lp_mutex);
mutex_unlock(&n_port->lp_mutex);
}
EXPORT_SYMBOL(fc_vport_setlink);
/**
* fc_vports_linkchange() - change the link state of all vports
* @n_port: Parent N_Port that has changed state
*
* Locking: called with the n_port lp_mutex held
*/
void fc_vports_linkchange(struct fc_lport *n_port)
{
struct fc_lport *vn_port;
list_for_each_entry(vn_port, &n_port->vports, list) {
mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT);
__fc_vport_setlink(n_port, vn_port);
mutex_unlock(&vn_port->lp_mutex);
}
}
| linux-master | drivers/scsi/libfc/fc_npiv.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2007 Intel Corporation. All rights reserved.
*
* Maintained at www.Open-FCoE.org
*/
/*
* Frame allocation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/crc32.h>
#include <linux/gfp.h>
#include <scsi/fc_frame.h>
/*
* Check the CRC in a frame.
*/
u32 fc_frame_crc_check(struct fc_frame *fp)
{
u32 crc;
u32 error;
const u8 *bp;
unsigned int len;
WARN_ON(!fc_frame_is_linear(fp));
fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
bp = (const u8 *) fr_hdr(fp);
crc = ~crc32(~0, bp, len);
error = crc ^ fr_crc(fp);
return error;
}
EXPORT_SYMBOL(fc_frame_crc_check);
/*
* Allocate a frame intended to be sent.
* Get an sk_buff for the frame and set the length.
*/
struct fc_frame *_fc_frame_alloc(size_t len)
{
struct fc_frame *fp;
struct sk_buff *skb;
WARN_ON((len % sizeof(u32)) != 0);
len += sizeof(struct fc_frame_header);
skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM +
NET_SKB_PAD, GFP_ATOMIC);
if (!skb)
return NULL;
skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM);
fp = (struct fc_frame *) skb;
fc_frame_init(fp);
skb_put(skb, len);
return fp;
}
EXPORT_SYMBOL(_fc_frame_alloc);
struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
{
struct fc_frame *fp;
size_t fill;
fill = payload_len % 4;
if (fill != 0)
fill = 4 - fill;
fp = _fc_frame_alloc(payload_len + fill);
if (fp) {
memset((char *) fr_hdr(fp) + payload_len, 0, fill);
/* trim is OK, we just allocated it so there are no fragments */
skb_trim(fp_skb(fp),
payload_len + sizeof(struct fc_frame_header));
}
return fp;
}
EXPORT_SYMBOL(fc_frame_alloc_fill);
| linux-master | drivers/scsi/libfc/fc_frame.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
*
* Maintained at www.Open-FCoE.org
*/
/*
* RPORT GENERAL INFO
*
* This file contains all processing regarding fc_rports. It contains the
* rport state machine and does all rport interaction with the transport class.
* There should be no other places in libfc that interact directly with the
* transport class in regards to adding and deleting rports.
*
* fc_rport's represent N_Port's within the fabric.
*/
/*
* RPORT LOCKING
*
* The rport should never hold the rport mutex and then attempt to acquire
* either the lport or disc mutexes. The rport's mutex is considered lesser
* than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
* more comments on the hierarchy.
*
* The locking strategy is similar to the lport's strategy. The lock protects
* the rport's states and is held and released by the entry points to the rport
* block. All _enter_* functions correspond to rport states and expect the rport
* mutex to be locked before calling them. This means that rports only handle
* one request or response at a time, since they're not critical for the I/O
* path this potential over-use of the mutex is acceptable.
*/
/*
* RPORT REFERENCE COUNTING
*
* A rport reference should be taken when:
* - an rport is allocated
* - a workqueue item is scheduled
* - an ELS request is send
* The reference should be dropped when:
* - the workqueue function has finished
* - the ELS response is handled
* - an rport is removed
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/export.h>
#include <linux/rculist.h>
#include <asm/unaligned.h>
#include <scsi/libfc.h>
#include "fc_encode.h"
#include "fc_libfc.h"
static struct workqueue_struct *rport_event_queue;
static void fc_rport_enter_flogi(struct fc_rport_priv *);
static void fc_rport_enter_plogi(struct fc_rport_priv *);
static void fc_rport_enter_prli(struct fc_rport_priv *);
static void fc_rport_enter_rtv(struct fc_rport_priv *);
static void fc_rport_enter_ready(struct fc_rport_priv *);
static void fc_rport_enter_logo(struct fc_rport_priv *);
static void fc_rport_enter_adisc(struct fc_rport_priv *);
static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_frame *);
static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
static void fc_rport_timeout(struct work_struct *);
static void fc_rport_error(struct fc_rport_priv *, int);
static void fc_rport_error_retry(struct fc_rport_priv *, int);
static void fc_rport_work(struct work_struct *);
static const char *fc_rport_state_names[] = {
[RPORT_ST_INIT] = "Init",
[RPORT_ST_FLOGI] = "FLOGI",
[RPORT_ST_PLOGI_WAIT] = "PLOGI_WAIT",
[RPORT_ST_PLOGI] = "PLOGI",
[RPORT_ST_PRLI] = "PRLI",
[RPORT_ST_RTV] = "RTV",
[RPORT_ST_READY] = "Ready",
[RPORT_ST_ADISC] = "ADISC",
[RPORT_ST_DELETE] = "Delete",
};
/**
* fc_rport_lookup() - Lookup a remote port by port_id
* @lport: The local port to lookup the remote port on
* @port_id: The remote port ID to look up
*
* The reference count of the fc_rport_priv structure is
* increased by one.
*/
struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
u32 port_id)
{
struct fc_rport_priv *rdata = NULL, *tmp_rdata;
rcu_read_lock();
list_for_each_entry_rcu(tmp_rdata, &lport->disc.rports, peers)
if (tmp_rdata->ids.port_id == port_id &&
kref_get_unless_zero(&tmp_rdata->kref)) {
rdata = tmp_rdata;
break;
}
rcu_read_unlock();
return rdata;
}
EXPORT_SYMBOL(fc_rport_lookup);
/**
* fc_rport_create() - Create a new remote port
* @lport: The local port this remote port will be associated with
* @port_id: The identifiers for the new remote port
*
* The remote port will start in the INIT state.
*/
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
size_t rport_priv_size = sizeof(*rdata);
lockdep_assert_held(&lport->disc.disc_mutex);
rdata = fc_rport_lookup(lport, port_id);
if (rdata) {
kref_put(&rdata->kref, fc_rport_destroy);
return rdata;
}
if (lport->rport_priv_size > 0)
rport_priv_size = lport->rport_priv_size;
rdata = kzalloc(rport_priv_size, GFP_KERNEL);
if (!rdata)
return NULL;
rdata->ids.node_name = -1;
rdata->ids.port_name = -1;
rdata->ids.port_id = port_id;
rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
kref_init(&rdata->kref);
mutex_init(&rdata->rp_mutex);
rdata->local_port = lport;
rdata->rp_state = RPORT_ST_INIT;
rdata->event = RPORT_EV_NONE;
rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
rdata->e_d_tov = lport->e_d_tov;
rdata->r_a_tov = lport->r_a_tov;
rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
INIT_WORK(&rdata->event_work, fc_rport_work);
if (port_id != FC_FID_DIR_SERV) {
rdata->lld_event_callback = lport->tt.rport_event_callback;
list_add_rcu(&rdata->peers, &lport->disc.rports);
}
return rdata;
}
EXPORT_SYMBOL(fc_rport_create);
/**
* fc_rport_destroy() - Free a remote port after last reference is released
* @kref: The remote port's kref
*/
void fc_rport_destroy(struct kref *kref)
{
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
kfree_rcu(rdata, rcu);
}
EXPORT_SYMBOL(fc_rport_destroy);
/**
* fc_rport_state() - Return a string identifying the remote port's state
* @rdata: The remote port
*/
static const char *fc_rport_state(struct fc_rport_priv *rdata)
{
const char *cp;
cp = fc_rport_state_names[rdata->rp_state];
if (!cp)
cp = "Unknown";
return cp;
}
/**
* fc_set_rport_loss_tmo() - Set the remote port loss timeout
* @rport: The remote port that gets a new timeout value
* @timeout: The new timeout value (in seconds)
*/
void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
{
if (timeout)
rport->dev_loss_tmo = timeout;
else
rport->dev_loss_tmo = 1;
}
EXPORT_SYMBOL(fc_set_rport_loss_tmo);
/**
* fc_plogi_get_maxframe() - Get the maximum payload from the common service
* parameters in a FLOGI frame
* @flp: The FLOGI or PLOGI payload
* @maxval: The maximum frame size upper limit; this may be less than what
* is in the service parameters
*/
static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
unsigned int maxval)
{
unsigned int mfs;
/*
* Get max payload from the common service parameters and the
* class 3 receive data field size.
*/
mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
maxval = mfs;
mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
maxval = mfs;
return maxval;
}
/**
* fc_rport_state_enter() - Change the state of a remote port
* @rdata: The remote port whose state should change
* @new: The new state
*/
static void fc_rport_state_enter(struct fc_rport_priv *rdata,
enum fc_rport_state new)
{
lockdep_assert_held(&rdata->rp_mutex);
if (rdata->rp_state != new)
rdata->retries = 0;
rdata->rp_state = new;
}
/**
* fc_rport_work() - Handler for remote port events in the rport_event_queue
* @work: Handle to the remote port being dequeued
*
* Reference counting: drops kref on return
*/
static void fc_rport_work(struct work_struct *work)
{
u32 port_id;
struct fc_rport_priv *rdata =
container_of(work, struct fc_rport_priv, event_work);
struct fc_rport_libfc_priv *rpriv;
enum fc_rport_event event;
struct fc_lport *lport = rdata->local_port;
struct fc_rport_operations *rport_ops;
struct fc_rport_identifiers ids;
struct fc_rport *rport;
struct fc4_prov *prov;
u8 type;
mutex_lock(&rdata->rp_mutex);
event = rdata->event;
rport_ops = rdata->ops;
rport = rdata->rport;
FC_RPORT_DBG(rdata, "work event %u\n", event);
switch (event) {
case RPORT_EV_READY:
ids = rdata->ids;
rdata->event = RPORT_EV_NONE;
rdata->major_retries = 0;
kref_get(&rdata->kref);
mutex_unlock(&rdata->rp_mutex);
if (!rport) {
FC_RPORT_DBG(rdata, "No rport!\n");
rport = fc_remote_port_add(lport->host, 0, &ids);
}
if (!rport) {
FC_RPORT_DBG(rdata, "Failed to add the rport\n");
fc_rport_logoff(rdata);
kref_put(&rdata->kref, fc_rport_destroy);
return;
}
mutex_lock(&rdata->rp_mutex);
if (rdata->rport)
FC_RPORT_DBG(rdata, "rport already allocated\n");
rdata->rport = rport;
rport->maxframe_size = rdata->maxframe_size;
rport->supported_classes = rdata->supported_classes;
rpriv = rport->dd_data;
rpriv->local_port = lport;
rpriv->rp_state = rdata->rp_state;
rpriv->flags = rdata->flags;
rpriv->e_d_tov = rdata->e_d_tov;
rpriv->r_a_tov = rdata->r_a_tov;
mutex_unlock(&rdata->rp_mutex);
if (rport_ops && rport_ops->event_callback) {
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
rport_ops->event_callback(lport, rdata, event);
}
if (rdata->lld_event_callback) {
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
rdata->lld_event_callback(lport, rdata, event);
}
kref_put(&rdata->kref, fc_rport_destroy);
break;
case RPORT_EV_FAILED:
case RPORT_EV_LOGO:
case RPORT_EV_STOP:
if (rdata->prli_count) {
mutex_lock(&fc_prov_mutex);
for (type = 1; type < FC_FC4_PROV_SIZE; type++) {
prov = fc_passive_prov[type];
if (prov && prov->prlo)
prov->prlo(rdata);
}
mutex_unlock(&fc_prov_mutex);
}
port_id = rdata->ids.port_id;
mutex_unlock(&rdata->rp_mutex);
if (rport_ops && rport_ops->event_callback) {
FC_RPORT_DBG(rdata, "callback ev %d\n", event);
rport_ops->event_callback(lport, rdata, event);
}
if (rdata->lld_event_callback) {
FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
rdata->lld_event_callback(lport, rdata, event);
}
if (cancel_delayed_work_sync(&rdata->retry_work))
kref_put(&rdata->kref, fc_rport_destroy);
/*
* Reset any outstanding exchanges before freeing rport.
*/
lport->tt.exch_mgr_reset(lport, 0, port_id);
lport->tt.exch_mgr_reset(lport, port_id, 0);
if (rport) {
rpriv = rport->dd_data;
rpriv->rp_state = RPORT_ST_DELETE;
mutex_lock(&rdata->rp_mutex);
rdata->rport = NULL;
mutex_unlock(&rdata->rp_mutex);
fc_remote_port_delete(rport);
}
mutex_lock(&rdata->rp_mutex);
if (rdata->rp_state == RPORT_ST_DELETE) {
if (port_id == FC_FID_DIR_SERV) {
rdata->event = RPORT_EV_NONE;
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
} else if ((rdata->flags & FC_RP_STARTED) &&
rdata->major_retries <
lport->max_rport_retry_count) {
rdata->major_retries++;
rdata->event = RPORT_EV_NONE;
FC_RPORT_DBG(rdata, "work restart\n");
fc_rport_enter_flogi(rdata);
mutex_unlock(&rdata->rp_mutex);
} else {
mutex_unlock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "work delete\n");
mutex_lock(&lport->disc.disc_mutex);
list_del_rcu(&rdata->peers);
mutex_unlock(&lport->disc.disc_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
}
} else {
/*
* Re-open for events. Reissue READY event if ready.
*/
rdata->event = RPORT_EV_NONE;
if (rdata->rp_state == RPORT_ST_READY) {
FC_RPORT_DBG(rdata, "work reopen\n");
fc_rport_enter_ready(rdata);
}
mutex_unlock(&rdata->rp_mutex);
}
break;
default:
mutex_unlock(&rdata->rp_mutex);
break;
}
kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_login() - Start the remote port login state machine
* @rdata: The remote port to be logged in to
*
* Initiates the RP state machine. It is called from the LP module.
* This function will issue the following commands to the N_Port
* identified by the FC ID provided.
*
* - PLOGI
* - PRLI
* - RTV
*
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*
* This indicates the intent to be logged into the remote port.
* If it appears we are already logged in, ADISC is used to verify
* the setup.
*/
int fc_rport_login(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
if (rdata->flags & FC_RP_STARTED) {
FC_RPORT_DBG(rdata, "port already started\n");
mutex_unlock(&rdata->rp_mutex);
return 0;
}
rdata->flags |= FC_RP_STARTED;
switch (rdata->rp_state) {
case RPORT_ST_READY:
FC_RPORT_DBG(rdata, "ADISC port\n");
fc_rport_enter_adisc(rdata);
break;
case RPORT_ST_DELETE:
FC_RPORT_DBG(rdata, "Restart deleted port\n");
break;
case RPORT_ST_INIT:
FC_RPORT_DBG(rdata, "Login to port\n");
fc_rport_enter_flogi(rdata);
break;
default:
FC_RPORT_DBG(rdata, "Login in progress, state %s\n",
fc_rport_state(rdata));
break;
}
mutex_unlock(&rdata->rp_mutex);
return 0;
}
EXPORT_SYMBOL(fc_rport_login);
/**
* fc_rport_enter_delete() - Schedule a remote port to be deleted
* @rdata: The remote port to be deleted
* @event: The event to report as the reason for deletion
*
* Allow state change into DELETE only once.
*
* Call queue_work only if there's no event already pending.
* Set the new event so that the old pending event will not occur.
* Since we have the mutex, even if fc_rport_work() is already started,
* it'll see the new event.
*
* Reference counting: does not modify kref
*/
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
enum fc_rport_event event)
{
lockdep_assert_held(&rdata->rp_mutex);
if (rdata->rp_state == RPORT_ST_DELETE)
return;
FC_RPORT_DBG(rdata, "Delete port\n");
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
if (rdata->event == RPORT_EV_NONE) {
kref_get(&rdata->kref);
if (!queue_work(rport_event_queue, &rdata->event_work))
kref_put(&rdata->kref, fc_rport_destroy);
}
rdata->event = event;
}
/**
* fc_rport_logoff() - Logoff and remove a remote port
* @rdata: The remote port to be logged off of
*
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*/
int fc_rport_logoff(struct fc_rport_priv *rdata)
{
struct fc_lport *lport = rdata->local_port;
u32 port_id = rdata->ids.port_id;
mutex_lock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Remove port\n");
rdata->flags &= ~FC_RP_STARTED;
if (rdata->rp_state == RPORT_ST_DELETE) {
FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
goto out;
}
/*
* FC-LS states:
* To explicitly Logout, the initiating Nx_Port shall terminate
* other open Sequences that it initiated with the destination
* Nx_Port prior to performing Logout.
*/
lport->tt.exch_mgr_reset(lport, 0, port_id);
lport->tt.exch_mgr_reset(lport, port_id, 0);
fc_rport_enter_logo(rdata);
/*
* Change the state to Delete so that we discard
* the response.
*/
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
out:
mutex_unlock(&rdata->rp_mutex);
return 0;
}
EXPORT_SYMBOL(fc_rport_logoff);
/**
* fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
* @rdata: The remote port that is ready
*
* Reference counting: schedules workqueue, does not modify kref
*/
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
{
lockdep_assert_held(&rdata->rp_mutex);
fc_rport_state_enter(rdata, RPORT_ST_READY);
FC_RPORT_DBG(rdata, "Port is Ready\n");
kref_get(&rdata->kref);
if (rdata->event == RPORT_EV_NONE &&
!queue_work(rport_event_queue, &rdata->event_work))
kref_put(&rdata->kref, fc_rport_destroy);
rdata->event = RPORT_EV_READY;
}
/**
* fc_rport_timeout() - Handler for the retry_work timer
* @work: Handle to the remote port that has timed out
*
* Locking Note: Called without the rport lock held. This
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*
* Reference counting: Drops kref on return.
*/
static void fc_rport_timeout(struct work_struct *work)
{
struct fc_rport_priv *rdata =
container_of(work, struct fc_rport_priv, retry_work.work);
mutex_lock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Port timeout, state %s\n", fc_rport_state(rdata));
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
fc_rport_enter_flogi(rdata);
break;
case RPORT_ST_PLOGI:
fc_rport_enter_plogi(rdata);
break;
case RPORT_ST_PRLI:
fc_rport_enter_prli(rdata);
break;
case RPORT_ST_RTV:
fc_rport_enter_rtv(rdata);
break;
case RPORT_ST_ADISC:
fc_rport_enter_adisc(rdata);
break;
case RPORT_ST_PLOGI_WAIT:
case RPORT_ST_READY:
case RPORT_ST_INIT:
case RPORT_ST_DELETE:
break;
}
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_error() - Error handler, called once retries have been exhausted
* @rdata: The remote port the error is happened on
* @err: The error code
*
* Reference counting: does not modify kref
*/
static void fc_rport_error(struct fc_rport_priv *rdata, int err)
{
struct fc_lport *lport = rdata->local_port;
lockdep_assert_held(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
-err, fc_rport_state(rdata), rdata->retries);
switch (rdata->rp_state) {
case RPORT_ST_FLOGI:
rdata->flags &= ~FC_RP_STARTED;
fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
break;
case RPORT_ST_PLOGI:
if (lport->point_to_multipoint) {
rdata->flags &= ~FC_RP_STARTED;
fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
} else
fc_rport_enter_logo(rdata);
break;
case RPORT_ST_RTV:
fc_rport_enter_ready(rdata);
break;
case RPORT_ST_PRLI:
fc_rport_enter_plogi(rdata);
break;
case RPORT_ST_ADISC:
fc_rport_enter_logo(rdata);
break;
case RPORT_ST_PLOGI_WAIT:
case RPORT_ST_DELETE:
case RPORT_ST_READY:
case RPORT_ST_INIT:
break;
}
}
/**
* fc_rport_error_retry() - Handler for remote port state retries
* @rdata: The remote port whose state is to be retried
* @err: The error code
*
* If the error was an exchange timeout retry immediately,
* otherwise wait for E_D_TOV.
*
* Reference counting: increments kref when scheduling retry_work
*/
static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
{
unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
lockdep_assert_held(&rdata->rp_mutex);
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (err == -FC_EX_CLOSED)
goto out;
if (rdata->retries < rdata->local_port->max_rport_retry_count) {
FC_RPORT_DBG(rdata, "Error %d in state %s, retrying\n",
err, fc_rport_state(rdata));
rdata->retries++;
/* no additional delay on exchange timeouts */
if (err == -FC_EX_TIMEOUT)
delay = 0;
kref_get(&rdata->kref);
if (!schedule_delayed_work(&rdata->retry_work, delay))
kref_put(&rdata->kref, fc_rport_destroy);
return;
}
out:
fc_rport_error(rdata, err);
}
/**
* fc_rport_login_complete() - Handle parameters and completion of p-mp login.
* @rdata: The remote port which we logged into or which logged into us.
* @fp: The FLOGI or PLOGI request or response frame
*
* Returns non-zero error if a problem is detected with the frame.
* Does not free the frame.
*
* This is only used in point-to-multipoint mode for FIP currently.
*/
static int fc_rport_login_complete(struct fc_rport_priv *rdata,
struct fc_frame *fp)
{
struct fc_lport *lport = rdata->local_port;
struct fc_els_flogi *flogi;
unsigned int e_d_tov;
u16 csp_flags;
flogi = fc_frame_payload_get(fp, sizeof(*flogi));
if (!flogi)
return -EINVAL;
csp_flags = ntohs(flogi->fl_csp.sp_features);
if (fc_frame_payload_op(fp) == ELS_FLOGI) {
if (csp_flags & FC_SP_FT_FPORT) {
FC_RPORT_DBG(rdata, "Fabric bit set in FLOGI\n");
return -EINVAL;
}
} else {
/*
* E_D_TOV is not valid on an incoming FLOGI request.
*/
e_d_tov = ntohl(flogi->fl_csp.sp_e_d_tov);
if (csp_flags & FC_SP_FT_EDTR)
e_d_tov /= 1000000;
if (e_d_tov > rdata->e_d_tov)
rdata->e_d_tov = e_d_tov;
}
rdata->maxframe_size = fc_plogi_get_maxframe(flogi, lport->mfs);
return 0;
}
/**
* fc_rport_flogi_resp() - Handle response to FLOGI request for p-mp mode
* @sp: The sequence that the FLOGI was on
* @fp: The FLOGI response frame
* @rp_arg: The remote port that received the FLOGI response
*/
static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
void *rp_arg)
{
struct fc_rport_priv *rdata = rp_arg;
struct fc_lport *lport = rdata->local_port;
struct fc_els_flogi *flogi;
unsigned int r_a_tov;
u8 opcode;
int err = 0;
FC_RPORT_DBG(rdata, "Received a FLOGI %s\n",
IS_ERR(fp) ? "error" : fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
goto put;
mutex_lock(&rdata->rp_mutex);
if (rdata->rp_state != RPORT_ST_FLOGI) {
FC_RPORT_DBG(rdata, "Received a FLOGI response, but in state "
"%s\n", fc_rport_state(rdata));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
opcode = fc_frame_payload_op(fp);
if (opcode == ELS_LS_RJT) {
struct fc_els_ls_rjt *rjt;
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
FC_RPORT_DBG(rdata, "FLOGI ELS rejected, reason %x expl %x\n",
rjt->er_reason, rjt->er_explan);
err = -FC_EX_ELS_RJT;
goto bad;
} else if (opcode != ELS_LS_ACC) {
FC_RPORT_DBG(rdata, "FLOGI ELS invalid opcode %x\n", opcode);
err = -FC_EX_ELS_RJT;
goto bad;
}
if (fc_rport_login_complete(rdata, fp)) {
FC_RPORT_DBG(rdata, "FLOGI failed, no login\n");
err = -FC_EX_INV_LOGIN;
goto bad;
}
flogi = fc_frame_payload_get(fp, sizeof(*flogi));
if (!flogi) {
err = -FC_EX_ALLOC_ERR;
goto bad;
}
r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
if (r_a_tov > rdata->r_a_tov)
rdata->r_a_tov = r_a_tov;
if (rdata->ids.port_name < lport->wwpn)
fc_rport_enter_plogi(rdata);
else
fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
put:
kref_put(&rdata->kref, fc_rport_destroy);
return;
bad:
FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
fc_rport_error_retry(rdata, err);
goto out;
}
/**
* fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp
* @rdata: The remote port to send a FLOGI to
*
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
{
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
lockdep_assert_held(&rdata->rp_mutex);
if (!lport->point_to_multipoint)
return fc_rport_enter_plogi(rdata);
FC_RPORT_DBG(rdata, "Entered FLOGI state from %s state\n",
fc_rport_state(rdata));
fc_rport_state_enter(rdata, RPORT_ST_FLOGI);
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp)
return fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
fc_rport_flogi_resp, rdata,
2 * lport->r_a_tov)) {
fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
/**
* fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
* @lport: The local port that received the PLOGI request
* @rx_fp: The PLOGI request frame
*
* Reference counting: drops kref on return
*/
static void fc_rport_recv_flogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
{
struct fc_els_flogi *flp;
struct fc_rport_priv *rdata;
struct fc_frame *fp = rx_fp;
struct fc_seq_els_data rjt_data;
u32 sid;
sid = fc_frame_sid(fp);
FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n");
if (!lport->point_to_multipoint) {
rjt_data.reason = ELS_RJT_UNSUP;
rjt_data.explan = ELS_EXPL_NONE;
goto reject;
}
flp = fc_frame_payload_get(fp, sizeof(*flp));
if (!flp) {
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_INV_LEN;
goto reject;
}
rdata = fc_rport_lookup(lport, sid);
if (!rdata) {
rjt_data.reason = ELS_RJT_FIP;
rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
goto reject;
}
mutex_lock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Received FLOGI in %s state\n",
fc_rport_state(rdata));
switch (rdata->rp_state) {
case RPORT_ST_INIT:
/*
* If received the FLOGI request on RPORT which is INIT state
* (means not transition to FLOGI either fc_rport timeout
* function didn;t trigger or this end hasn;t received
* beacon yet from other end. In that case only, allow RPORT
* state machine to continue, otherwise fall through which
* causes the code to send reject response.
* NOTE; Not checking for FIP->state such as VNMP_UP or
* VNMP_CLAIM because if FIP state is not one of those,
* RPORT wouldn;t have created and 'rport_lookup' would have
* failed anyway in that case.
*/
break;
case RPORT_ST_DELETE:
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_FIP;
rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
goto reject_put;
case RPORT_ST_FLOGI:
case RPORT_ST_PLOGI_WAIT:
case RPORT_ST_PLOGI:
break;
case RPORT_ST_PRLI:
case RPORT_ST_RTV:
case RPORT_ST_READY:
case RPORT_ST_ADISC:
/*
* Set the remote port to be deleted and to then restart.
* This queues work to be sure exchanges are reset.
*/
fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_BUSY;
rjt_data.explan = ELS_EXPL_NONE;
goto reject_put;
}
if (fc_rport_login_complete(rdata, fp)) {
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
goto reject_put;
}
fp = fc_frame_alloc(lport, sizeof(*flp));
if (!fp)
goto out;
fc_flogi_fill(lport, fp);
flp = fc_frame_payload_get(fp, sizeof(*flp));
flp->fl_cmd = ELS_LS_ACC;
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
/*
* Do not proceed with the state machine if our
* FLOGI has crossed with an FLOGI from the
* remote port; wait for the FLOGI response instead.
*/
if (rdata->rp_state != RPORT_ST_FLOGI) {
if (rdata->ids.port_name < lport->wwpn)
fc_rport_enter_plogi(rdata);
else
fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
}
out:
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
fc_frame_free(rx_fp);
return;
reject_put:
kref_put(&rdata->kref, fc_rport_destroy);
reject:
fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(rx_fp);
}
/**
* fc_rport_plogi_resp() - Handler for ELS PLOGI responses
* @sp: The sequence the PLOGI is on
* @fp: The PLOGI response frame
* @rdata_arg: The remote port that sent the PLOGI response
*
* Locking Note: This function will be called without the rport lock
* held, but it will lock, call an _enter_* function or fc_rport_error
* and then unlock the rport.
*/
static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
void *rdata_arg)
{
struct fc_rport_priv *rdata = rdata_arg;
struct fc_lport *lport = rdata->local_port;
struct fc_els_flogi *plp = NULL;
u16 csp_seq;
u16 cssp_seq;
u8 op;
FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
goto put;
mutex_lock(&rdata->rp_mutex);
if (rdata->rp_state != RPORT_ST_PLOGI) {
FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
"%s\n", fc_rport_state(rdata));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_rport_error_retry(rdata, PTR_ERR(fp));
goto err;
}
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC &&
(plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
/* save plogi response sp_features for further reference */
rdata->sp_features = ntohs(plp->fl_csp.sp_features);
if (lport->point_to_multipoint)
fc_rport_login_complete(rdata, fp);
csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
if (cssp_seq < csp_seq)
csp_seq = cssp_seq;
rdata->max_seq = csp_seq;
rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
fc_rport_enter_prli(rdata);
} else {
struct fc_els_ls_rjt *rjt;
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
if (!rjt)
FC_RPORT_DBG(rdata, "PLOGI bad response\n");
else
FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
rjt->er_reason, rjt->er_explan);
fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
}
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
put:
kref_put(&rdata->kref, fc_rport_destroy);
}
static bool
fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
{
if (rdata->ids.roles == FC_PORT_ROLE_UNKNOWN)
return true;
if ((rdata->ids.roles & FC_PORT_ROLE_FCP_TARGET) &&
(lport->service_params & FCP_SPPF_INIT_FCN))
return true;
if ((rdata->ids.roles & FC_PORT_ROLE_FCP_INITIATOR) &&
(lport->service_params & FCP_SPPF_TARG_FCN))
return true;
return false;
}
/**
* fc_rport_enter_plogi() - Send Port Login (PLOGI) request
* @rdata: The remote port to send a PLOGI to
*
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
{
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
lockdep_assert_held(&rdata->rp_mutex);
if (!fc_rport_compatible_roles(lport, rdata)) {
FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n");
fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
return;
}
FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
fc_rport_state(rdata));
fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
if (!fp) {
FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
rdata->e_d_tov = lport->e_d_tov;
kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
fc_rport_plogi_resp, rdata,
2 * lport->r_a_tov)) {
fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
/**
* fc_rport_prli_resp() - Process Login (PRLI) response handler
* @sp: The sequence the PRLI response was on
* @fp: The PRLI response frame
* @rdata_arg: The remote port that sent the PRLI response
*
* Locking Note: This function will be called without the rport lock
* held, but it will lock, call an _enter_* function or fc_rport_error
* and then unlock the rport.
*/
static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
void *rdata_arg)
{
struct fc_rport_priv *rdata = rdata_arg;
struct {
struct fc_els_prli prli;
struct fc_els_spp spp;
} *pp;
struct fc_els_spp temp_spp;
struct fc_els_ls_rjt *rjt;
struct fc4_prov *prov;
u32 roles = FC_RPORT_ROLE_UNKNOWN;
u32 fcp_parm = 0;
u8 op;
enum fc_els_spp_resp resp_code;
FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
goto put;
mutex_lock(&rdata->rp_mutex);
if (rdata->rp_state != RPORT_ST_PRLI) {
FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
"%s\n", fc_rport_state(rdata));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_rport_error_retry(rdata, PTR_ERR(fp));
goto err;
}
/* reinitialize remote port roles */
rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC) {
pp = fc_frame_payload_get(fp, sizeof(*pp));
if (!pp) {
fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
}
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
pp->spp.spp_flags, pp->spp.spp_type);
rdata->spp_type = pp->spp.spp_type;
if (resp_code != FC_SPP_RESP_ACK) {
if (resp_code == FC_SPP_RESP_CONF)
fc_rport_error(rdata, -FC_EX_SEQ_ERR);
else
fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
}
if (pp->prli.prli_spp_len < sizeof(pp->spp)) {
fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
}
fcp_parm = ntohl(pp->spp.spp_params);
if (fcp_parm & FCP_SPPF_RETRY)
rdata->flags |= FC_RP_FLAGS_RETRY;
if (fcp_parm & FCP_SPPF_CONF_COMPL)
rdata->flags |= FC_RP_FLAGS_CONF_REQ;
/*
* Call prli provider if we should act as a target
*/
if (rdata->spp_type < FC_FC4_PROV_SIZE) {
prov = fc_passive_prov[rdata->spp_type];
if (prov) {
memset(&temp_spp, 0, sizeof(temp_spp));
prov->prli(rdata, pp->prli.prli_spp_len,
&pp->spp, &temp_spp);
}
}
/*
* Check if the image pair could be established
*/
if (rdata->spp_type != FC_TYPE_FCP ||
!(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) {
/*
* Nope; we can't use this port as a target.
*/
fcp_parm &= ~FCP_SPPF_TARG_FCN;
}
rdata->supported_classes = FC_COS_CLASS3;
if (fcp_parm & FCP_SPPF_INIT_FCN)
roles |= FC_RPORT_ROLE_FCP_INITIATOR;
if (fcp_parm & FCP_SPPF_TARG_FCN)
roles |= FC_RPORT_ROLE_FCP_TARGET;
rdata->ids.roles = roles;
fc_rport_enter_rtv(rdata);
} else {
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
if (!rjt)
FC_RPORT_DBG(rdata, "PRLI bad response\n");
else {
FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
rjt->er_reason, rjt->er_explan);
if (rjt->er_reason == ELS_RJT_UNAB &&
rjt->er_explan == ELS_EXPL_PLOGI_REQD) {
fc_rport_enter_plogi(rdata);
goto out;
}
}
fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
put:
kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_enter_prli() - Send Process Login (PRLI) request
* @rdata: The remote port to send the PRLI request to
*
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
{
struct fc_lport *lport = rdata->local_port;
struct {
struct fc_els_prli prli;
struct fc_els_spp spp;
} *pp;
struct fc_frame *fp;
struct fc4_prov *prov;
lockdep_assert_held(&rdata->rp_mutex);
/*
* If the rport is one of the well known addresses
* we skip PRLI and RTV and go straight to READY.
*/
if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
fc_rport_enter_ready(rdata);
return;
}
/*
* And if the local port does not support the initiator function
* there's no need to send a PRLI, either.
*/
if (!(lport->service_params & FCP_SPPF_INIT_FCN)) {
fc_rport_enter_ready(rdata);
return;
}
FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
fc_rport_state(rdata));
fc_rport_state_enter(rdata, RPORT_ST_PRLI);
fp = fc_frame_alloc(lport, sizeof(*pp));
if (!fp) {
fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
fc_prli_fill(lport, fp);
prov = fc_passive_prov[FC_TYPE_FCP];
if (prov) {
pp = fc_frame_payload_get(fp, sizeof(*pp));
prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp);
}
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id,
fc_host_port_id(lport->host), FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
kref_get(&rdata->kref);
if (!fc_exch_seq_send(lport, fp, fc_rport_prli_resp,
NULL, rdata, 2 * lport->r_a_tov)) {
fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
/**
* fc_rport_rtv_resp() - Handler for Request Timeout Value (RTV) responses
* @sp: The sequence the RTV was on
* @fp: The RTV response frame
* @rdata_arg: The remote port that sent the RTV response
*
* Many targets don't seem to support this.
*
* Locking Note: This function will be called without the rport lock
* held, but it will lock, call an _enter_* function or fc_rport_error
* and then unlock the rport.
*/
static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
void *rdata_arg)
{
struct fc_rport_priv *rdata = rdata_arg;
u8 op;
FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
if (fp == ERR_PTR(-FC_EX_CLOSED))
goto put;
mutex_lock(&rdata->rp_mutex);
if (rdata->rp_state != RPORT_ST_RTV) {
FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
"%s\n", fc_rport_state(rdata));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC) {
struct fc_els_rtv_acc *rtv;
u32 toq;
u32 tov;
rtv = fc_frame_payload_get(fp, sizeof(*rtv));
if (rtv) {
toq = ntohl(rtv->rtv_toq);
tov = ntohl(rtv->rtv_r_a_tov);
if (tov == 0)
tov = 1;
if (tov > rdata->r_a_tov)
rdata->r_a_tov = tov;
tov = ntohl(rtv->rtv_e_d_tov);
if (toq & FC_ELS_RTV_EDRES)
tov /= 1000000;
if (tov == 0)
tov = 1;
if (tov > rdata->e_d_tov)
rdata->e_d_tov = tov;
}
}
fc_rport_enter_ready(rdata);
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
put:
kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request
* @rdata: The remote port to send the RTV request to
*
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
{
struct fc_frame *fp;
struct fc_lport *lport = rdata->local_port;
lockdep_assert_held(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
fc_rport_state(rdata));
fc_rport_state_enter(rdata, RPORT_ST_RTV);
fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
if (!fp) {
fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
fc_rport_rtv_resp, rdata,
2 * lport->r_a_tov)) {
fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
/**
* fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
* @rdata: The remote port that sent the RTV request
* @in_fp: The RTV request frame
*/
static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
struct fc_frame *in_fp)
{
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
struct fc_els_rtv_acc *rtv;
struct fc_seq_els_data rjt_data;
lockdep_assert_held(&rdata->rp_mutex);
lockdep_assert_held(&lport->lp_mutex);
FC_RPORT_DBG(rdata, "Received RTV request\n");
fp = fc_frame_alloc(lport, sizeof(*rtv));
if (!fp) {
rjt_data.reason = ELS_RJT_UNAB;
rjt_data.explan = ELS_EXPL_INSUF_RES;
fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
goto drop;
}
rtv = fc_frame_payload_get(fp, sizeof(*rtv));
rtv->rtv_cmd = ELS_LS_ACC;
rtv->rtv_r_a_tov = htonl(lport->r_a_tov);
rtv->rtv_e_d_tov = htonl(lport->e_d_tov);
rtv->rtv_toq = 0;
fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
drop:
fc_frame_free(in_fp);
}
/**
* fc_rport_logo_resp() - Handler for logout (LOGO) responses
* @sp: The sequence the LOGO was on
* @fp: The LOGO response frame
* @rdata_arg: The remote port
*/
static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
void *rdata_arg)
{
struct fc_rport_priv *rdata = rdata_arg;
struct fc_lport *lport = rdata->local_port;
FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
"Received a LOGO %s\n", fc_els_resp_type(fp));
if (!IS_ERR(fp))
fc_frame_free(fp);
kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_enter_logo() - Send a logout (LOGO) request
* @rdata: The remote port to send the LOGO request to
*
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
{
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
lockdep_assert_held(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n",
fc_rport_state(rdata));
fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
if (!fp)
return;
kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
fc_rport_logo_resp, rdata, 0))
kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_adisc_resp() - Handler for Address Discovery (ADISC) responses
* @sp: The sequence the ADISC response was on
* @fp: The ADISC response frame
* @rdata_arg: The remote port that sent the ADISC response
*
* Locking Note: This function will be called without the rport lock
* held, but it will lock, call an _enter_* function or fc_rport_error
* and then unlock the rport.
*/
static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
void *rdata_arg)
{
struct fc_rport_priv *rdata = rdata_arg;
struct fc_els_adisc *adisc;
u8 op;
FC_RPORT_DBG(rdata, "Received a ADISC response\n");
if (fp == ERR_PTR(-FC_EX_CLOSED))
goto put;
mutex_lock(&rdata->rp_mutex);
if (rdata->rp_state != RPORT_ST_ADISC) {
FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
fc_rport_state(rdata));
if (IS_ERR(fp))
goto err;
goto out;
}
if (IS_ERR(fp)) {
fc_rport_error(rdata, PTR_ERR(fp));
goto err;
}
/*
* If address verification failed. Consider us logged out of the rport.
* Since the rport is still in discovery, we want to be
* logged in, so go to PLOGI state. Otherwise, go back to READY.
*/
op = fc_frame_payload_op(fp);
adisc = fc_frame_payload_get(fp, sizeof(*adisc));
if (op != ELS_LS_ACC || !adisc ||
ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
fc_rport_enter_flogi(rdata);
} else {
FC_RPORT_DBG(rdata, "ADISC OK\n");
fc_rport_enter_ready(rdata);
}
out:
fc_frame_free(fp);
err:
mutex_unlock(&rdata->rp_mutex);
put:
kref_put(&rdata->kref, fc_rport_destroy);
}
/**
* fc_rport_enter_adisc() - Send Address Discover (ADISC) request
* @rdata: The remote port to send the ADISC request to
*
* Reference counting: increments kref when sending ELS
*/
static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
{
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
lockdep_assert_held(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
fc_rport_state(rdata));
fc_rport_state_enter(rdata, RPORT_ST_ADISC);
fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
if (!fp) {
fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
return;
}
kref_get(&rdata->kref);
if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
fc_rport_adisc_resp, rdata,
2 * lport->r_a_tov)) {
fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
kref_put(&rdata->kref, fc_rport_destroy);
}
}
/**
* fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
* @rdata: The remote port that sent the ADISC request
* @in_fp: The ADISC request frame
*/
static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
struct fc_frame *in_fp)
{
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
struct fc_els_adisc *adisc;
struct fc_seq_els_data rjt_data;
lockdep_assert_held(&rdata->rp_mutex);
lockdep_assert_held(&lport->lp_mutex);
FC_RPORT_DBG(rdata, "Received ADISC request\n");
adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
if (!adisc) {
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
goto drop;
}
fp = fc_frame_alloc(lport, sizeof(*adisc));
if (!fp)
goto drop;
fc_adisc_fill(lport, fp);
adisc = fc_frame_payload_get(fp, sizeof(*adisc));
adisc->adisc_cmd = ELS_LS_ACC;
fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
drop:
fc_frame_free(in_fp);
}
/**
* fc_rport_recv_rls_req() - Handle received Read Link Status request
* @rdata: The remote port that sent the RLS request
* @rx_fp: The PRLI request frame
*/
static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
{
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
struct fc_els_rls *rls;
struct fc_els_rls_resp *rsp;
struct fc_els_lesb *lesb;
struct fc_seq_els_data rjt_data;
struct fc_host_statistics *hst;
lockdep_assert_held(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
fc_rport_state(rdata));
rls = fc_frame_payload_get(rx_fp, sizeof(*rls));
if (!rls) {
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
goto out_rjt;
}
fp = fc_frame_alloc(lport, sizeof(*rsp));
if (!fp) {
rjt_data.reason = ELS_RJT_UNAB;
rjt_data.explan = ELS_EXPL_INSUF_RES;
goto out_rjt;
}
rsp = fc_frame_payload_get(fp, sizeof(*rsp));
memset(rsp, 0, sizeof(*rsp));
rsp->rls_cmd = ELS_LS_ACC;
lesb = &rsp->rls_lesb;
if (lport->tt.get_lesb) {
/* get LESB from LLD if it supports it */
lport->tt.get_lesb(lport, lesb);
} else {
fc_get_host_stats(lport->host);
hst = &lport->host_stats;
lesb->lesb_link_fail = htonl(hst->link_failure_count);
lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count);
lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count);
lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count);
lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count);
lesb->lesb_inv_crc = htonl(hst->invalid_crc_count);
}
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
goto out;
out_rjt:
fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
out:
fc_frame_free(rx_fp);
}
/**
* fc_rport_recv_els_req() - Handler for validated ELS requests
* @lport: The local port that received the ELS request
* @fp: The ELS request frame
*
* Handle incoming ELS requests that require port login.
* The ELS opcode has already been validated by the caller.
*
* Reference counting: does not modify kref
*/
static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_rport_priv *rdata;
struct fc_seq_els_data els_data;
lockdep_assert_held(&lport->lp_mutex);
rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
if (!rdata) {
FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
"Received ELS 0x%02x from non-logged-in port\n",
fc_frame_payload_op(fp));
goto reject;
}
mutex_lock(&rdata->rp_mutex);
switch (rdata->rp_state) {
case RPORT_ST_PRLI:
case RPORT_ST_RTV:
case RPORT_ST_READY:
case RPORT_ST_ADISC:
break;
case RPORT_ST_PLOGI:
if (fc_frame_payload_op(fp) == ELS_PRLI) {
FC_RPORT_DBG(rdata, "Reject ELS PRLI "
"while in state %s\n",
fc_rport_state(rdata));
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
goto busy;
}
fallthrough;
default:
FC_RPORT_DBG(rdata,
"Reject ELS 0x%02x while in state %s\n",
fc_frame_payload_op(fp), fc_rport_state(rdata));
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
goto reject;
}
switch (fc_frame_payload_op(fp)) {
case ELS_PRLI:
fc_rport_recv_prli_req(rdata, fp);
break;
case ELS_PRLO:
fc_rport_recv_prlo_req(rdata, fp);
break;
case ELS_ADISC:
fc_rport_recv_adisc_req(rdata, fp);
break;
case ELS_RRQ:
fc_seq_els_rsp_send(fp, ELS_RRQ, NULL);
fc_frame_free(fp);
break;
case ELS_REC:
fc_seq_els_rsp_send(fp, ELS_REC, NULL);
fc_frame_free(fp);
break;
case ELS_RLS:
fc_rport_recv_rls_req(rdata, fp);
break;
case ELS_RTV:
fc_rport_recv_rtv_req(rdata, fp);
break;
default:
fc_frame_free(fp); /* can't happen */
break;
}
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
return;
reject:
els_data.reason = ELS_RJT_UNAB;
els_data.explan = ELS_EXPL_PLOGI_REQD;
fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
return;
busy:
els_data.reason = ELS_RJT_BUSY;
els_data.explan = ELS_EXPL_NONE;
fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
return;
}
/**
* fc_rport_recv_req() - Handler for requests
* @lport: The local port that received the request
* @fp: The request frame
*
* Reference counting: does not modify kref
*/
void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
lockdep_assert_held(&lport->lp_mutex);
/*
* Handle FLOGI, PLOGI and LOGO requests separately, since they
* don't require prior login.
* Check for unsupported opcodes first and reject them.
* For some ops, it would be incorrect to reject with "PLOGI required".
*/
switch (fc_frame_payload_op(fp)) {
case ELS_FLOGI:
fc_rport_recv_flogi_req(lport, fp);
break;
case ELS_PLOGI:
fc_rport_recv_plogi_req(lport, fp);
break;
case ELS_LOGO:
fc_rport_recv_logo_req(lport, fp);
break;
case ELS_PRLI:
case ELS_PRLO:
case ELS_ADISC:
case ELS_RRQ:
case ELS_REC:
case ELS_RLS:
case ELS_RTV:
fc_rport_recv_els_req(lport, fp);
break;
default:
els_data.reason = ELS_RJT_UNSUP;
els_data.explan = ELS_EXPL_NONE;
fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
fc_frame_free(fp);
break;
}
}
EXPORT_SYMBOL(fc_rport_recv_req);
/**
* fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
* @lport: The local port that received the PLOGI request
* @rx_fp: The PLOGI request frame
*
* Reference counting: increments kref on return
*/
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
struct fc_frame *rx_fp)
{
struct fc_disc *disc;
struct fc_rport_priv *rdata;
struct fc_frame *fp = rx_fp;
struct fc_els_flogi *pl;
struct fc_seq_els_data rjt_data;
u32 sid;
lockdep_assert_held(&lport->lp_mutex);
sid = fc_frame_sid(fp);
FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
pl = fc_frame_payload_get(fp, sizeof(*pl));
if (!pl) {
FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
goto reject;
}
disc = &lport->disc;
mutex_lock(&disc->disc_mutex);
rdata = fc_rport_create(lport, sid);
if (!rdata) {
mutex_unlock(&disc->disc_mutex);
rjt_data.reason = ELS_RJT_UNAB;
rjt_data.explan = ELS_EXPL_INSUF_RES;
goto reject;
}
mutex_lock(&rdata->rp_mutex);
mutex_unlock(&disc->disc_mutex);
rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
/*
* If the rport was just created, possibly due to the incoming PLOGI,
* set the state appropriately and accept the PLOGI.
*
* If we had also sent a PLOGI, and if the received PLOGI is from a
* higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
* "command already in progress".
*
* XXX TBD: If the session was ready before, the PLOGI should result in
* all outstanding exchanges being reset.
*/
switch (rdata->rp_state) {
case RPORT_ST_INIT:
FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
break;
case RPORT_ST_PLOGI_WAIT:
FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI_WAIT state\n");
break;
case RPORT_ST_PLOGI:
FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
if (rdata->ids.port_name < lport->wwpn) {
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_INPROG;
rjt_data.explan = ELS_EXPL_NONE;
goto reject;
}
break;
case RPORT_ST_PRLI:
case RPORT_ST_RTV:
case RPORT_ST_READY:
case RPORT_ST_ADISC:
FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
"- ignored for now\n", rdata->rp_state);
/* XXX TBD - should reset */
break;
case RPORT_ST_FLOGI:
case RPORT_ST_DELETE:
FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
fc_rport_state(rdata));
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_BUSY;
rjt_data.explan = ELS_EXPL_NONE;
goto reject;
}
if (!fc_rport_compatible_roles(lport, rdata)) {
FC_RPORT_DBG(rdata, "Received PLOGI for incompatible role\n");
mutex_unlock(&rdata->rp_mutex);
rjt_data.reason = ELS_RJT_LOGIC;
rjt_data.explan = ELS_EXPL_NONE;
goto reject;
}
/*
* Get session payload size from incoming PLOGI.
*/
rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
/*
* Send LS_ACC. If this fails, the originator should retry.
*/
fp = fc_frame_alloc(lport, sizeof(*pl));
if (!fp)
goto out;
fc_plogi_fill(lport, fp, ELS_LS_ACC);
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
fc_rport_enter_prli(rdata);
out:
mutex_unlock(&rdata->rp_mutex);
fc_frame_free(rx_fp);
return;
reject:
fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
fc_frame_free(fp);
}
/**
* fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
* @rdata: The remote port that sent the PRLI request
* @rx_fp: The PRLI request frame
*/
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
{
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
struct {
struct fc_els_prli prli;
struct fc_els_spp spp;
} *pp;
struct fc_els_spp *rspp; /* request service param page */
struct fc_els_spp *spp; /* response spp */
unsigned int len;
unsigned int plen;
enum fc_els_spp_resp resp;
struct fc_seq_els_data rjt_data;
struct fc4_prov *prov;
lockdep_assert_held(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
fc_rport_state(rdata));
len = fr_len(rx_fp) - sizeof(struct fc_frame_header);
pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
if (!pp)
goto reject_len;
plen = ntohs(pp->prli.prli_len);
if ((plen % 4) != 0 || plen > len || plen < 16)
goto reject_len;
if (plen < len)
len = plen;
plen = pp->prli.prli_spp_len;
if ((plen % 4) != 0 || plen < sizeof(*spp) ||
plen > len || len < sizeof(*pp) || plen < 12)
goto reject_len;
rspp = &pp->spp;
fp = fc_frame_alloc(lport, len);
if (!fp) {
rjt_data.reason = ELS_RJT_UNAB;
rjt_data.explan = ELS_EXPL_INSUF_RES;
goto reject;
}
pp = fc_frame_payload_get(fp, len);
WARN_ON(!pp);
memset(pp, 0, len);
pp->prli.prli_cmd = ELS_LS_ACC;
pp->prli.prli_spp_len = plen;
pp->prli.prli_len = htons(len);
len -= sizeof(struct fc_els_prli);
/*
* Go through all the service parameter pages and build
* response. If plen indicates longer SPP than standard,
* use that. The entire response has been pre-cleared above.
*/
spp = &pp->spp;
mutex_lock(&fc_prov_mutex);
while (len >= plen) {
rdata->spp_type = rspp->spp_type;
spp->spp_type = rspp->spp_type;
spp->spp_type_ext = rspp->spp_type_ext;
resp = 0;
if (rspp->spp_type < FC_FC4_PROV_SIZE) {
enum fc_els_spp_resp active = 0, passive = 0;
prov = fc_active_prov[rspp->spp_type];
if (prov)
active = prov->prli(rdata, plen, rspp, spp);
prov = fc_passive_prov[rspp->spp_type];
if (prov)
passive = prov->prli(rdata, plen, rspp, spp);
if (!active || passive == FC_SPP_RESP_ACK)
resp = passive;
else
resp = active;
FC_RPORT_DBG(rdata, "PRLI rspp type %x "
"active %x passive %x\n",
rspp->spp_type, active, passive);
}
if (!resp) {
if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
resp |= FC_SPP_RESP_CONF;
else
resp |= FC_SPP_RESP_INVL;
}
spp->spp_flags |= resp;
len -= plen;
rspp = (struct fc_els_spp *)((char *)rspp + plen);
spp = (struct fc_els_spp *)((char *)spp + plen);
}
mutex_unlock(&fc_prov_mutex);
/*
* Send LS_ACC. If this fails, the originator should retry.
*/
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
goto drop;
reject_len:
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
drop:
fc_frame_free(rx_fp);
}
/**
* fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
* @rdata: The remote port that sent the PRLO request
* @rx_fp: The PRLO request frame
*/
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
struct fc_frame *rx_fp)
{
struct fc_lport *lport = rdata->local_port;
struct fc_frame *fp;
struct {
struct fc_els_prlo prlo;
struct fc_els_spp spp;
} *pp;
struct fc_els_spp *rspp; /* request service param page */
struct fc_els_spp *spp; /* response spp */
unsigned int len;
unsigned int plen;
struct fc_seq_els_data rjt_data;
lockdep_assert_held(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
fc_rport_state(rdata));
len = fr_len(rx_fp) - sizeof(struct fc_frame_header);
pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
if (!pp)
goto reject_len;
plen = ntohs(pp->prlo.prlo_len);
if (plen != 20)
goto reject_len;
if (plen < len)
len = plen;
rspp = &pp->spp;
fp = fc_frame_alloc(lport, len);
if (!fp) {
rjt_data.reason = ELS_RJT_UNAB;
rjt_data.explan = ELS_EXPL_INSUF_RES;
goto reject;
}
pp = fc_frame_payload_get(fp, len);
WARN_ON(!pp);
memset(pp, 0, len);
pp->prlo.prlo_cmd = ELS_LS_ACC;
pp->prlo.prlo_obs = 0x10;
pp->prlo.prlo_len = htons(len);
spp = &pp->spp;
spp->spp_type = rspp->spp_type;
spp->spp_type_ext = rspp->spp_type_ext;
spp->spp_flags = FC_SPP_RESP_ACK;
fc_rport_enter_prli(rdata);
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
goto drop;
reject_len:
rjt_data.reason = ELS_RJT_PROT;
rjt_data.explan = ELS_EXPL_INV_LEN;
reject:
fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
drop:
fc_frame_free(rx_fp);
}
/**
* fc_rport_recv_logo_req() - Handler for logout (LOGO) requests
* @lport: The local port that received the LOGO request
* @fp: The LOGO request frame
*
* Reference counting: drops kref on return
*/
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_rport_priv *rdata;
u32 sid;
lockdep_assert_held(&lport->lp_mutex);
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
sid = fc_frame_sid(fp);
rdata = fc_rport_lookup(lport, sid);
if (rdata) {
mutex_lock(&rdata->rp_mutex);
FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
fc_rport_state(rdata));
fc_rport_enter_delete(rdata, RPORT_EV_STOP);
mutex_unlock(&rdata->rp_mutex);
kref_put(&rdata->kref, fc_rport_destroy);
} else
FC_RPORT_ID_DBG(lport, sid,
"Received LOGO from non-logged-in port\n");
fc_frame_free(fp);
}
/**
* fc_rport_flush_queue() - Flush the rport_event_queue
*/
void fc_rport_flush_queue(void)
{
flush_workqueue(rport_event_queue);
}
EXPORT_SYMBOL(fc_rport_flush_queue);
/**
* fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
* @rdata: remote port private
* @spp_len: service parameter page length
* @rspp: received service parameter page
* @spp: response service parameter page
*
* Returns the value for the response code to be placed in spp_flags;
* Returns 0 if not an initiator.
*/
static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
const struct fc_els_spp *rspp,
struct fc_els_spp *spp)
{
struct fc_lport *lport = rdata->local_port;
u32 fcp_parm;
fcp_parm = ntohl(rspp->spp_params);
rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
if (fcp_parm & FCP_SPPF_INIT_FCN)
rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
if (fcp_parm & FCP_SPPF_TARG_FCN)
rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
if (fcp_parm & FCP_SPPF_RETRY)
rdata->flags |= FC_RP_FLAGS_RETRY;
rdata->supported_classes = FC_COS_CLASS3;
if (!(lport->service_params & FCP_SPPF_INIT_FCN))
return 0;
spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
/*
* OR in our service parameters with other providers (target), if any.
*/
fcp_parm = ntohl(spp->spp_params);
spp->spp_params = htonl(fcp_parm | lport->service_params);
return FC_SPP_RESP_ACK;
}
/*
* FC-4 provider ops for FCP initiator.
*/
struct fc4_prov fc_rport_fcp_init = {
.prli = fc_rport_fcp_prli,
};
/**
* fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0
* @rdata: remote port private
* @spp_len: service parameter page length
* @rspp: received service parameter page
* @spp: response service parameter page
*/
static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len,
const struct fc_els_spp *rspp,
struct fc_els_spp *spp)
{
if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR)
return FC_SPP_RESP_INVL;
return FC_SPP_RESP_ACK;
}
/*
* FC-4 provider ops for type 0 service parameters.
*
* This handles the special case of type 0 which is always successful
* but doesn't do anything otherwise.
*/
struct fc4_prov fc_rport_t0_prov = {
.prli = fc_rport_t0_prli,
};
/**
* fc_setup_rport() - Initialize the rport_event_queue
*/
int fc_setup_rport(void)
{
rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
if (!rport_event_queue)
return -ENOMEM;
return 0;
}
/**
* fc_destroy_rport() - Destroy the rport_event_queue
*/
void fc_destroy_rport(void)
{
destroy_workqueue(rport_event_queue);
}
/**
* fc_rport_terminate_io() - Stop all outstanding I/O on a remote port
* @rport: The remote port whose I/O should be terminated
*/
void fc_rport_terminate_io(struct fc_rport *rport)
{
struct fc_rport_libfc_priv *rpriv = rport->dd_data;
struct fc_lport *lport = rpriv->local_port;
lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
}
EXPORT_SYMBOL(fc_rport_terminate_io);
| linux-master | drivers/scsi/libfc/fc_rport.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2007 Intel Corporation. All rights reserved.
* Copyright(c) 2008 Red Hat, Inc. All rights reserved.
* Copyright(c) 2008 Mike Christie
*
* Maintained at www.Open-FCoE.org
*/
/*
* Fibre Channel exchange and sequence handling.
*/
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/log2.h>
#include <scsi/fc/fc_fc2.h>
#include <scsi/libfc.h>
#include "fc_libfc.h"
u16 fc_cpu_mask; /* cpu mask for possible cpus */
EXPORT_SYMBOL(fc_cpu_mask);
static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
static struct workqueue_struct *fc_exch_workqueue;
/*
* Structure and function definitions for managing Fibre Channel Exchanges
* and Sequences.
*
* The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
*
* fc_exch_mgr holds the exchange state for an N port
*
* fc_exch holds state for one exchange and links to its active sequence.
*
* fc_seq holds the state for an individual sequence.
*/
/**
* struct fc_exch_pool - Per cpu exchange pool
* @next_index: Next possible free exchange index
* @total_exches: Total allocated exchanges
* @lock: Exch pool lock
* @ex_list: List of exchanges
* @left: Cache of free slot in exch array
* @right: Cache of free slot in exch array
*
* This structure manages per cpu exchanges in array of exchange pointers.
* This array is allocated followed by struct fc_exch_pool memory for
* assigned range of exchanges to per cpu pool.
*/
struct fc_exch_pool {
spinlock_t lock;
struct list_head ex_list;
u16 next_index;
u16 total_exches;
u16 left;
u16 right;
} ____cacheline_aligned_in_smp;
/**
* struct fc_exch_mgr - The Exchange Manager (EM).
* @class: Default class for new sequences
* @kref: Reference counter
* @min_xid: Minimum exchange ID
* @max_xid: Maximum exchange ID
* @ep_pool: Reserved exchange pointers
* @pool_max_index: Max exch array index in exch pool
* @pool: Per cpu exch pool
* @lport: Local exchange port
* @stats: Statistics structure
*
* This structure is the center for creating exchanges and sequences.
* It manages the allocation of exchange IDs.
*/
struct fc_exch_mgr {
struct fc_exch_pool __percpu *pool;
mempool_t *ep_pool;
struct fc_lport *lport;
enum fc_class class;
struct kref kref;
u16 min_xid;
u16 max_xid;
u16 pool_max_index;
struct {
atomic_t no_free_exch;
atomic_t no_free_exch_xid;
atomic_t xid_not_found;
atomic_t xid_busy;
atomic_t seq_not_found;
atomic_t non_bls_resp;
} stats;
};
/**
* struct fc_exch_mgr_anchor - primary structure for list of EMs
* @ema_list: Exchange Manager Anchor list
* @mp: Exchange Manager associated with this anchor
* @match: Routine to determine if this anchor's EM should be used
*
* When walking the list of anchors the match routine will be called
* for each anchor to determine if that EM should be used. The last
* anchor in the list will always match to handle any exchanges not
* handled by other EMs. The non-default EMs would be added to the
* anchor list by HW that provides offloads.
*/
struct fc_exch_mgr_anchor {
struct list_head ema_list;
struct fc_exch_mgr *mp;
bool (*match)(struct fc_frame *);
};
static void fc_exch_rrq(struct fc_exch *);
static void fc_seq_ls_acc(struct fc_frame *);
static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
enum fc_els_rjt_explan);
static void fc_exch_els_rec(struct fc_frame *);
static void fc_exch_els_rrq(struct fc_frame *);
/*
* Internal implementation notes.
*
* The exchange manager is one by default in libfc but LLD may choose
* to have one per CPU. The sequence manager is one per exchange manager
* and currently never separated.
*
* Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
* assigned by the Sequence Initiator that shall be unique for a specific
* D_ID and S_ID pair while the Sequence is open." Note that it isn't
* qualified by exchange ID, which one might think it would be.
* In practice this limits the number of open sequences and exchanges to 256
* per session. For most targets we could treat this limit as per exchange.
*
* The exchange and its sequence are freed when the last sequence is received.
* It's possible for the remote port to leave an exchange open without
* sending any sequences.
*
* Notes on reference counts:
*
* Exchanges are reference counted and exchange gets freed when the reference
* count becomes zero.
*
* Timeouts:
* Sequences are timed out for E_D_TOV and R_A_TOV.
*
* Sequence event handling:
*
* The following events may occur on initiator sequences:
*
* Send.
* For now, the whole thing is sent.
* Receive ACK
* This applies only to class F.
* The sequence is marked complete.
* ULP completion.
* The upper layer calls fc_exch_done() when done
* with exchange and sequence tuple.
* RX-inferred completion.
* When we receive the next sequence on the same exchange, we can
* retire the previous sequence ID. (XXX not implemented).
* Timeout.
* R_A_TOV frees the sequence ID. If we're waiting for ACK,
* E_D_TOV causes abort and calls upper layer response handler
* with FC_EX_TIMEOUT error.
* Receive RJT
* XXX defer.
* Send ABTS
* On timeout.
*
* The following events may occur on recipient sequences:
*
* Receive
* Allocate sequence for first frame received.
* Hold during receive handler.
* Release when final frame received.
* Keep status of last N of these for the ELS RES command. XXX TBD.
* Receive ABTS
* Deallocate sequence
* Send RJT
* Deallocate
*
* For now, we neglect conditions where only part of a sequence was
* received or transmitted, or where out-of-order receipt is detected.
*/
/*
* Locking notes:
*
* The EM code run in a per-CPU worker thread.
*
* To protect against concurrency between a worker thread code and timers,
* sequence allocation and deallocation must be locked.
* - exchange refcnt can be done atomicly without locks.
* - sequence allocation must be locked by exch lock.
* - If the EM pool lock and ex_lock must be taken at the same time, then the
* EM pool lock must be taken before the ex_lock.
*/
/*
* opcode names for debugging.
*/
static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
/**
* fc_exch_name_lookup() - Lookup name by opcode
* @op: Opcode to be looked up
* @table: Opcode/name table
* @max_index: Index not to be exceeded
*
* This routine is used to determine a human-readable string identifying
* a R_CTL opcode.
*/
static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
unsigned int max_index)
{
const char *name = NULL;
if (op < max_index)
name = table[op];
if (!name)
name = "unknown";
return name;
}
/**
* fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup()
* @op: The opcode to be looked up
*/
static const char *fc_exch_rctl_name(unsigned int op)
{
return fc_exch_name_lookup(op, fc_exch_rctl_names,
ARRAY_SIZE(fc_exch_rctl_names));
}
/**
* fc_exch_hold() - Increment an exchange's reference count
* @ep: Echange to be held
*/
static inline void fc_exch_hold(struct fc_exch *ep)
{
atomic_inc(&ep->ex_refcnt);
}
/**
* fc_exch_setup_hdr() - Initialize a FC header by initializing some fields
* and determine SOF and EOF.
* @ep: The exchange to that will use the header
* @fp: The frame whose header is to be modified
* @f_ctl: F_CTL bits that will be used for the frame header
*
* The fields initialized by this routine are: fh_ox_id, fh_rx_id,
* fh_seq_id, fh_seq_cnt and the SOF and EOF.
*/
static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
u32 f_ctl)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
u16 fill;
fr_sof(fp) = ep->class;
if (ep->seq.cnt)
fr_sof(fp) = fc_sof_normal(ep->class);
if (f_ctl & FC_FC_END_SEQ) {
fr_eof(fp) = FC_EOF_T;
if (fc_sof_needs_ack((enum fc_sof)ep->class))
fr_eof(fp) = FC_EOF_N;
/*
* From F_CTL.
* The number of fill bytes to make the length a 4-byte
* multiple is the low order 2-bits of the f_ctl.
* The fill itself will have been cleared by the frame
* allocation.
* After this, the length will be even, as expected by
* the transport.
*/
fill = fr_len(fp) & 3;
if (fill) {
fill = 4 - fill;
/* TODO, this may be a problem with fragmented skb */
skb_put(fp_skb(fp), fill);
hton24(fh->fh_f_ctl, f_ctl | fill);
}
} else {
WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
fr_eof(fp) = FC_EOF_N;
}
/* Initialize remaining fh fields from fc_fill_fc_hdr */
fh->fh_ox_id = htons(ep->oxid);
fh->fh_rx_id = htons(ep->rxid);
fh->fh_seq_id = ep->seq.id;
fh->fh_seq_cnt = htons(ep->seq.cnt);
}
/**
* fc_exch_release() - Decrement an exchange's reference count
* @ep: Exchange to be released
*
* If the reference count reaches zero and the exchange is complete,
* it is freed.
*/
static void fc_exch_release(struct fc_exch *ep)
{
struct fc_exch_mgr *mp;
if (atomic_dec_and_test(&ep->ex_refcnt)) {
mp = ep->em;
if (ep->destructor)
ep->destructor(&ep->seq, ep->arg);
WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
mempool_free(ep, mp->ep_pool);
}
}
/**
* fc_exch_timer_cancel() - cancel exch timer
* @ep: The exchange whose timer to be canceled
*/
static inline void fc_exch_timer_cancel(struct fc_exch *ep)
{
if (cancel_delayed_work(&ep->timeout_work)) {
FC_EXCH_DBG(ep, "Exchange timer canceled\n");
atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
}
}
/**
* fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
* the exchange lock held
* @ep: The exchange whose timer will start
* @timer_msec: The timeout period
*
* Used for upper level protocols to time out the exchange.
* The timer is cancelled when it fires or when the exchange completes.
*/
static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
unsigned int timer_msec)
{
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
return;
FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
fc_exch_hold(ep); /* hold for timer */
if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
msecs_to_jiffies(timer_msec))) {
FC_EXCH_DBG(ep, "Exchange already queued\n");
fc_exch_release(ep);
}
}
/**
* fc_exch_timer_set() - Lock the exchange and set the timer
* @ep: The exchange whose timer will start
* @timer_msec: The timeout period
*/
static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
{
spin_lock_bh(&ep->ex_lock);
fc_exch_timer_set_locked(ep, timer_msec);
spin_unlock_bh(&ep->ex_lock);
}
/**
* fc_exch_done_locked() - Complete an exchange with the exchange lock held
* @ep: The exchange that is complete
*
* Note: May sleep if invoked from outside a response handler.
*/
static int fc_exch_done_locked(struct fc_exch *ep)
{
int rc = 1;
/*
* We must check for completion in case there are two threads
* tyring to complete this. But the rrq code will reuse the
* ep, and in that case we only clear the resp and set it as
* complete, so it can be reused by the timer to send the rrq.
*/
if (ep->state & FC_EX_DONE)
return rc;
ep->esb_stat |= ESB_ST_COMPLETE;
if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
ep->state |= FC_EX_DONE;
fc_exch_timer_cancel(ep);
rc = 0;
}
return rc;
}
static struct fc_exch fc_quarantine_exch;
/**
* fc_exch_ptr_get() - Return an exchange from an exchange pool
* @pool: Exchange Pool to get an exchange from
* @index: Index of the exchange within the pool
*
* Use the index to get an exchange from within an exchange pool. exches
* will point to an array of exchange pointers. The index will select
* the exchange within the array.
*/
static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
u16 index)
{
struct fc_exch **exches = (struct fc_exch **)(pool + 1);
return exches[index];
}
/**
* fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
* @pool: The pool to assign the exchange to
* @index: The index in the pool where the exchange will be assigned
* @ep: The exchange to assign to the pool
*/
static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
struct fc_exch *ep)
{
((struct fc_exch **)(pool + 1))[index] = ep;
}
/**
* fc_exch_delete() - Delete an exchange
* @ep: The exchange to be deleted
*/
static void fc_exch_delete(struct fc_exch *ep)
{
struct fc_exch_pool *pool;
u16 index;
pool = ep->pool;
spin_lock_bh(&pool->lock);
WARN_ON(pool->total_exches <= 0);
pool->total_exches--;
/* update cache of free slot */
index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
if (!(ep->state & FC_EX_QUARANTINE)) {
if (pool->left == FC_XID_UNKNOWN)
pool->left = index;
else if (pool->right == FC_XID_UNKNOWN)
pool->right = index;
else
pool->next_index = index;
fc_exch_ptr_set(pool, index, NULL);
} else {
fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
}
list_del(&ep->ex_list);
spin_unlock_bh(&pool->lock);
fc_exch_release(ep); /* drop hold for exch in mp */
}
static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
struct fc_frame *fp)
{
struct fc_exch *ep;
struct fc_frame_header *fh = fc_frame_header_get(fp);
int error = -ENXIO;
u32 f_ctl;
u8 fh_type = fh->fh_type;
ep = fc_seq_exch(sp);
if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
fc_frame_free(fp);
goto out;
}
WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
f_ctl = ntoh24(fh->fh_f_ctl);
fc_exch_setup_hdr(ep, fp, f_ctl);
fr_encaps(fp) = ep->encaps;
/*
* update sequence count if this frame is carrying
* multiple FC frames when sequence offload is enabled
* by LLD.
*/
if (fr_max_payload(fp))
sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
fr_max_payload(fp));
else
sp->cnt++;
/*
* Send the frame.
*/
error = lport->tt.frame_send(lport, fp);
if (fh_type == FC_TYPE_BLS)
goto out;
/*
* Update the exchange and sequence flags,
* assuming all frames for the sequence have been sent.
* We can only be called to send once for each sequence.
*/
ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat &= ~ESB_ST_SEQ_INIT;
out:
return error;
}
/**
* fc_seq_send() - Send a frame using existing sequence/exchange pair
* @lport: The local port that the exchange will be sent on
* @sp: The sequence to be sent
* @fp: The frame to be sent on the exchange
*
* Note: The frame will be freed either by a direct call to fc_frame_free(fp)
* or indirectly by calling libfc_function_template.frame_send().
*/
int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
{
struct fc_exch *ep;
int error;
ep = fc_seq_exch(sp);
spin_lock_bh(&ep->ex_lock);
error = fc_seq_send_locked(lport, sp, fp);
spin_unlock_bh(&ep->ex_lock);
return error;
}
EXPORT_SYMBOL(fc_seq_send);
/**
* fc_seq_alloc() - Allocate a sequence for a given exchange
* @ep: The exchange to allocate a new sequence for
* @seq_id: The sequence ID to be used
*
* We don't support multiple originated sequences on the same exchange.
* By implication, any previously originated sequence on this exchange
* is complete, and we reallocate the same sequence.
*/
static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
{
struct fc_seq *sp;
sp = &ep->seq;
sp->ssb_stat = 0;
sp->cnt = 0;
sp->id = seq_id;
return sp;
}
/**
* fc_seq_start_next_locked() - Allocate a new sequence on the same
* exchange as the supplied sequence
* @sp: The sequence/exchange to get a new sequence for
*/
static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
sp = fc_seq_alloc(ep, ep->seq_id++);
FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
ep->f_ctl, sp->id);
return sp;
}
/**
* fc_seq_start_next() - Lock the exchange and get a new sequence
* for a given sequence/exchange pair
* @sp: The sequence/exchange to get a new exchange for
*/
struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
spin_lock_bh(&ep->ex_lock);
sp = fc_seq_start_next_locked(sp);
spin_unlock_bh(&ep->ex_lock);
return sp;
}
EXPORT_SYMBOL(fc_seq_start_next);
/*
* Set the response handler for the exchange associated with a sequence.
*
* Note: May sleep if invoked from outside a response handler.
*/
void fc_seq_set_resp(struct fc_seq *sp,
void (*resp)(struct fc_seq *, struct fc_frame *, void *),
void *arg)
{
struct fc_exch *ep = fc_seq_exch(sp);
DEFINE_WAIT(wait);
spin_lock_bh(&ep->ex_lock);
while (ep->resp_active && ep->resp_task != current) {
prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
spin_unlock_bh(&ep->ex_lock);
schedule();
spin_lock_bh(&ep->ex_lock);
}
finish_wait(&ep->resp_wq, &wait);
ep->resp = resp;
ep->arg = arg;
spin_unlock_bh(&ep->ex_lock);
}
EXPORT_SYMBOL(fc_seq_set_resp);
/**
* fc_exch_abort_locked() - Abort an exchange
* @ep: The exchange to be aborted
* @timer_msec: The period of time to wait before aborting
*
* Abort an exchange and sequence. Generally called because of a
* exchange timeout or an abort from the upper layer.
*
* A timer_msec can be specified for abort timeout, if non-zero
* timer_msec value is specified then exchange resp handler
* will be called with timeout error if no response to abort.
*
* Locking notes: Called with exch lock held
*
* Return value: 0 on success else error code
*/
static int fc_exch_abort_locked(struct fc_exch *ep,
unsigned int timer_msec)
{
struct fc_seq *sp;
struct fc_frame *fp;
int error;
FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
ep->esb_stat, ep->state);
return -ENXIO;
}
/*
* Send the abort on a new sequence if possible.
*/
sp = fc_seq_start_next_locked(&ep->seq);
if (!sp)
return -ENOMEM;
if (timer_msec)
fc_exch_timer_set_locked(ep, timer_msec);
if (ep->sid) {
/*
* Send an abort for the sequence that timed out.
*/
fp = fc_frame_alloc(ep->lp, 0);
if (fp) {
ep->esb_stat |= ESB_ST_SEQ_INIT;
fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
FC_TYPE_BLS, FC_FC_END_SEQ |
FC_FC_SEQ_INIT, 0);
error = fc_seq_send_locked(ep->lp, sp, fp);
} else {
error = -ENOBUFS;
}
} else {
/*
* If not logged into the fabric, don't send ABTS but leave
* sequence active until next timeout.
*/
error = 0;
}
ep->esb_stat |= ESB_ST_ABNORMAL;
return error;
}
/**
* fc_seq_exch_abort() - Abort an exchange and sequence
* @req_sp: The sequence to be aborted
* @timer_msec: The period of time to wait before aborting
*
* Generally called because of a timeout or an abort from the upper layer.
*
* Return value: 0 on success else error code
*/
int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
{
struct fc_exch *ep;
int error;
ep = fc_seq_exch(req_sp);
spin_lock_bh(&ep->ex_lock);
error = fc_exch_abort_locked(ep, timer_msec);
spin_unlock_bh(&ep->ex_lock);
return error;
}
/**
* fc_invoke_resp() - invoke ep->resp()
* @ep: The exchange to be operated on
* @fp: The frame pointer to pass through to ->resp()
* @sp: The sequence pointer to pass through to ->resp()
*
* Notes:
* It is assumed that after initialization finished (this means the
* first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
* modified only via fc_seq_set_resp(). This guarantees that none of these
* two variables changes if ep->resp_active > 0.
*
* If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
* this function is invoked, the first spin_lock_bh() call in this function
* will wait until fc_seq_set_resp() has finished modifying these variables.
*
* Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
* ep->resp() won't be invoked after fc_exch_done() has returned.
*
* The response handler itself may invoke fc_exch_done(), which will clear the
* ep->resp pointer.
*
* Return value:
* Returns true if and only if ep->resp has been invoked.
*/
static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
struct fc_frame *fp)
{
void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
void *arg;
bool res = false;
spin_lock_bh(&ep->ex_lock);
ep->resp_active++;
if (ep->resp_task != current)
ep->resp_task = !ep->resp_task ? current : NULL;
resp = ep->resp;
arg = ep->arg;
spin_unlock_bh(&ep->ex_lock);
if (resp) {
resp(sp, fp, arg);
res = true;
}
spin_lock_bh(&ep->ex_lock);
if (--ep->resp_active == 0)
ep->resp_task = NULL;
spin_unlock_bh(&ep->ex_lock);
if (ep->resp_active == 0)
wake_up(&ep->resp_wq);
return res;
}
/**
* fc_exch_timeout() - Handle exchange timer expiration
* @work: The work_struct identifying the exchange that timed out
*/
static void fc_exch_timeout(struct work_struct *work)
{
struct fc_exch *ep = container_of(work, struct fc_exch,
timeout_work.work);
struct fc_seq *sp = &ep->seq;
u32 e_stat;
int rc = 1;
FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
goto unlock;
e_stat = ep->esb_stat;
if (e_stat & ESB_ST_COMPLETE) {
ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
spin_unlock_bh(&ep->ex_lock);
if (e_stat & ESB_ST_REC_QUAL)
fc_exch_rrq(ep);
goto done;
} else {
if (e_stat & ESB_ST_ABNORMAL)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_delete(ep);
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
fc_seq_set_resp(sp, NULL, ep->arg);
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
goto done;
}
unlock:
spin_unlock_bh(&ep->ex_lock);
done:
/*
* This release matches the hold taken when the timer was set.
*/
fc_exch_release(ep);
}
/**
* fc_exch_em_alloc() - Allocate an exchange from a specified EM.
* @lport: The local port that the exchange is for
* @mp: The exchange manager that will allocate the exchange
*
* Returns pointer to allocated fc_exch with exch lock held.
*/
static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
struct fc_exch_mgr *mp)
{
struct fc_exch *ep;
unsigned int cpu;
u16 index;
struct fc_exch_pool *pool;
/* allocate memory for exchange */
ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
if (!ep) {
atomic_inc(&mp->stats.no_free_exch);
goto out;
}
memset(ep, 0, sizeof(*ep));
cpu = raw_smp_processor_id();
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
index = pool->left;
pool->left = FC_XID_UNKNOWN;
goto hit;
}
}
if (pool->right != FC_XID_UNKNOWN) {
if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
index = pool->right;
pool->right = FC_XID_UNKNOWN;
goto hit;
}
}
index = pool->next_index;
/* allocate new exch from pool */
while (fc_exch_ptr_get(pool, index)) {
index = index == mp->pool_max_index ? 0 : index + 1;
if (index == pool->next_index)
goto err;
}
pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
hit:
fc_exch_hold(ep); /* hold for exch in mp */
spin_lock_init(&ep->ex_lock);
/*
* Hold exch lock for caller to prevent fc_exch_reset()
* from releasing exch while fc_exch_alloc() caller is
* still working on exch.
*/
spin_lock_bh(&ep->ex_lock);
fc_exch_ptr_set(pool, index, ep);
list_add_tail(&ep->ex_list, &pool->ex_list);
fc_seq_alloc(ep, ep->seq_id++);
pool->total_exches++;
spin_unlock_bh(&pool->lock);
/*
* update exchange
*/
ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
ep->em = mp;
ep->pool = pool;
ep->lp = lport;
ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
ep->rxid = FC_XID_UNKNOWN;
ep->class = mp->class;
ep->resp_active = 0;
init_waitqueue_head(&ep->resp_wq);
INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
out:
return ep;
err:
spin_unlock_bh(&pool->lock);
atomic_inc(&mp->stats.no_free_exch_xid);
mempool_free(ep, mp->ep_pool);
return NULL;
}
/**
* fc_exch_alloc() - Allocate an exchange from an EM on a
* local port's list of EMs.
* @lport: The local port that will own the exchange
* @fp: The FC frame that the exchange will be for
*
* This function walks the list of exchange manager(EM)
* anchors to select an EM for a new exchange allocation. The
* EM is selected when a NULL match function pointer is encountered
* or when a call to a match function returns true.
*/
static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
struct fc_exch *ep;
list_for_each_entry(ema, &lport->ema_list, ema_list) {
if (!ema->match || ema->match(fp)) {
ep = fc_exch_em_alloc(lport, ema->mp);
if (ep)
return ep;
}
}
return NULL;
}
/**
* fc_exch_find() - Lookup and hold an exchange
* @mp: The exchange manager to lookup the exchange from
* @xid: The XID of the exchange to look up
*/
static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
{
struct fc_lport *lport = mp->lport;
struct fc_exch_pool *pool;
struct fc_exch *ep = NULL;
u16 cpu = xid & fc_cpu_mask;
if (xid == FC_XID_UNKNOWN)
return NULL;
if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
lport->host->host_no, lport->port_id, xid, cpu);
return NULL;
}
if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
pool = per_cpu_ptr(mp->pool, cpu);
spin_lock_bh(&pool->lock);
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
if (ep == &fc_quarantine_exch) {
FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
ep = NULL;
}
if (ep) {
WARN_ON(ep->xid != xid);
fc_exch_hold(ep);
}
spin_unlock_bh(&pool->lock);
}
return ep;
}
/**
* fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
* the memory allocated for the related objects may be freed.
* @sp: The sequence that has completed
*
* Note: May sleep if invoked from outside a response handler.
*/
void fc_exch_done(struct fc_seq *sp)
{
struct fc_exch *ep = fc_seq_exch(sp);
int rc;
spin_lock_bh(&ep->ex_lock);
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
fc_seq_set_resp(sp, NULL, ep->arg);
if (!rc)
fc_exch_delete(ep);
}
EXPORT_SYMBOL(fc_exch_done);
/**
* fc_exch_resp() - Allocate a new exchange for a response frame
* @lport: The local port that the exchange was for
* @mp: The exchange manager to allocate the exchange from
* @fp: The response frame
*
* Sets the responder ID in the frame header.
*/
static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
struct fc_exch_mgr *mp,
struct fc_frame *fp)
{
struct fc_exch *ep;
struct fc_frame_header *fh;
ep = fc_exch_alloc(lport, fp);
if (ep) {
ep->class = fc_frame_class(fp);
/*
* Set EX_CTX indicating we're responding on this exchange.
*/
ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
fh = fc_frame_header_get(fp);
ep->sid = ntoh24(fh->fh_d_id);
ep->did = ntoh24(fh->fh_s_id);
ep->oid = ep->did;
/*
* Allocated exchange has placed the XID in the
* originator field. Move it to the responder field,
* and set the originator XID from the frame.
*/
ep->rxid = ep->xid;
ep->oxid = ntohs(fh->fh_ox_id);
ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
ep->esb_stat &= ~ESB_ST_SEQ_INIT;
fc_exch_hold(ep); /* hold for caller */
spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */
}
return ep;
}
/**
* fc_seq_lookup_recip() - Find a sequence where the other end
* originated the sequence
* @lport: The local port that the frame was sent to
* @mp: The Exchange Manager to lookup the exchange from
* @fp: The frame associated with the sequence we're looking for
*
* If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
* on the ep that should be released by the caller.
*/
static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
struct fc_exch_mgr *mp,
struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_exch *ep = NULL;
struct fc_seq *sp = NULL;
enum fc_pf_rjt_reason reject = FC_RJT_NONE;
u32 f_ctl;
u16 xid;
f_ctl = ntoh24(fh->fh_f_ctl);
WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
/*
* Lookup or create the exchange if we will be creating the sequence.
*/
if (f_ctl & FC_FC_EX_CTX) {
xid = ntohs(fh->fh_ox_id); /* we originated exch */
ep = fc_exch_find(mp, xid);
if (!ep) {
atomic_inc(&mp->stats.xid_not_found);
reject = FC_RJT_OX_ID;
goto out;
}
if (ep->rxid == FC_XID_UNKNOWN)
ep->rxid = ntohs(fh->fh_rx_id);
else if (ep->rxid != ntohs(fh->fh_rx_id)) {
reject = FC_RJT_OX_ID;
goto rel;
}
} else {
xid = ntohs(fh->fh_rx_id); /* we are the responder */
/*
* Special case for MDS issuing an ELS TEST with a
* bad rxid of 0.
* XXX take this out once we do the proper reject.
*/
if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
fc_frame_payload_op(fp) == ELS_TEST) {
fh->fh_rx_id = htons(FC_XID_UNKNOWN);
xid = FC_XID_UNKNOWN;
}
/*
* new sequence - find the exchange
*/
ep = fc_exch_find(mp, xid);
if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
if (ep) {
atomic_inc(&mp->stats.xid_busy);
reject = FC_RJT_RX_ID;
goto rel;
}
ep = fc_exch_resp(lport, mp, fp);
if (!ep) {
reject = FC_RJT_EXCH_EST; /* XXX */
goto out;
}
xid = ep->xid; /* get our XID */
} else if (!ep) {
atomic_inc(&mp->stats.xid_not_found);
reject = FC_RJT_RX_ID; /* XID not found */
goto out;
}
}
spin_lock_bh(&ep->ex_lock);
/*
* At this point, we have the exchange held.
* Find or create the sequence.
*/
if (fc_sof_is_init(fr_sof(fp))) {
sp = &ep->seq;
sp->ssb_stat |= SSB_ST_RESP;
sp->id = fh->fh_seq_id;
} else {
sp = &ep->seq;
if (sp->id != fh->fh_seq_id) {
atomic_inc(&mp->stats.seq_not_found);
if (f_ctl & FC_FC_END_SEQ) {
/*
* Update sequence_id based on incoming last
* frame of sequence exchange. This is needed
* for FC target where DDP has been used
* on target where, stack is indicated only
* about last frame's (payload _header) header.
* Whereas "seq_id" which is part of
* frame_header is allocated by initiator
* which is totally different from "seq_id"
* allocated when XFER_RDY was sent by target.
* To avoid false -ve which results into not
* sending RSP, hence write request on other
* end never finishes.
*/
sp->ssb_stat |= SSB_ST_RESP;
sp->id = fh->fh_seq_id;
} else {
spin_unlock_bh(&ep->ex_lock);
/* sequence/exch should exist */
reject = FC_RJT_SEQ_ID;
goto rel;
}
}
}
WARN_ON(ep != fc_seq_exch(sp));
if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat |= ESB_ST_SEQ_INIT;
spin_unlock_bh(&ep->ex_lock);
fr_seq(fp) = sp;
out:
return reject;
rel:
fc_exch_done(&ep->seq);
fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
return reject;
}
/**
* fc_seq_lookup_orig() - Find a sequence where this end
* originated the sequence
* @mp: The Exchange Manager to lookup the exchange from
* @fp: The frame associated with the sequence we're looking for
*
* Does not hold the sequence for the caller.
*/
static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_exch *ep;
struct fc_seq *sp = NULL;
u32 f_ctl;
u16 xid;
f_ctl = ntoh24(fh->fh_f_ctl);
WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
ep = fc_exch_find(mp, xid);
if (!ep)
return NULL;
if (ep->seq.id == fh->fh_seq_id) {
/*
* Save the RX_ID if we didn't previously know it.
*/
sp = &ep->seq;
if ((f_ctl & FC_FC_EX_CTX) != 0 &&
ep->rxid == FC_XID_UNKNOWN) {
ep->rxid = ntohs(fh->fh_rx_id);
}
}
fc_exch_release(ep);
return sp;
}
/**
* fc_exch_set_addr() - Set the source and destination IDs for an exchange
* @ep: The exchange to set the addresses for
* @orig_id: The originator's ID
* @resp_id: The responder's ID
*
* Note this must be done before the first sequence of the exchange is sent.
*/
static void fc_exch_set_addr(struct fc_exch *ep,
u32 orig_id, u32 resp_id)
{
ep->oid = orig_id;
if (ep->esb_stat & ESB_ST_RESP) {
ep->sid = resp_id;
ep->did = orig_id;
} else {
ep->sid = orig_id;
ep->did = resp_id;
}
}
/**
* fc_seq_els_rsp_send() - Send an ELS response using information from
* the existing sequence/exchange.
* @fp: The received frame
* @els_cmd: The ELS command to be sent
* @els_data: The ELS data to be sent
*
* The received frame is not freed.
*/
void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
struct fc_seq_els_data *els_data)
{
switch (els_cmd) {
case ELS_LS_RJT:
fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
break;
case ELS_LS_ACC:
fc_seq_ls_acc(fp);
break;
case ELS_RRQ:
fc_exch_els_rrq(fp);
break;
case ELS_REC:
fc_exch_els_rec(fp);
break;
default:
FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
}
}
EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
/**
* fc_seq_send_last() - Send a sequence that is the last in the exchange
* @sp: The sequence that is to be sent
* @fp: The frame that will be sent on the sequence
* @rctl: The R_CTL information to be sent
* @fh_type: The frame header type
*/
static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
enum fc_rctl rctl, enum fc_fh_type fh_type)
{
u32 f_ctl;
struct fc_exch *ep = fc_seq_exch(sp);
f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
f_ctl |= ep->f_ctl;
fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
fc_seq_send_locked(ep->lp, sp, fp);
}
/**
* fc_seq_send_ack() - Send an acknowledgement that we've received a frame
* @sp: The sequence to send the ACK on
* @rx_fp: The received frame that is being acknoledged
*
* Send ACK_1 (or equiv.) indicating we received something.
*/
static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
{
struct fc_frame *fp;
struct fc_frame_header *rx_fh;
struct fc_frame_header *fh;
struct fc_exch *ep = fc_seq_exch(sp);
struct fc_lport *lport = ep->lp;
unsigned int f_ctl;
/*
* Don't send ACKs for class 3.
*/
if (fc_sof_needs_ack(fr_sof(rx_fp))) {
fp = fc_frame_alloc(lport, 0);
if (!fp) {
FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
return;
}
fh = fc_frame_header_get(fp);
fh->fh_r_ctl = FC_RCTL_ACK_1;
fh->fh_type = FC_TYPE_BLS;
/*
* Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
* Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
* Bits 9-8 are meaningful (retransmitted or unidirectional).
* Last ACK uses bits 7-6 (continue sequence),
* bits 5-4 are meaningful (what kind of ACK to use).
*/
rx_fh = fc_frame_header_get(rx_fp);
f_ctl = ntoh24(rx_fh->fh_f_ctl);
f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
FC_FC_RETX_SEQ | FC_FC_UNI_TX;
f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
hton24(fh->fh_f_ctl, f_ctl);
fc_exch_setup_hdr(ep, fp, f_ctl);
fh->fh_seq_id = rx_fh->fh_seq_id;
fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
fh->fh_parm_offset = htonl(1); /* ack single frame */
fr_sof(fp) = fr_sof(rx_fp);
if (f_ctl & FC_FC_END_SEQ)
fr_eof(fp) = FC_EOF_T;
else
fr_eof(fp) = FC_EOF_N;
lport->tt.frame_send(lport, fp);
}
}
/**
* fc_exch_send_ba_rjt() - Send BLS Reject
* @rx_fp: The frame being rejected
* @reason: The reason the frame is being rejected
* @explan: The explanation for the rejection
*
* This is for rejecting BA_ABTS only.
*/
static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
enum fc_ba_rjt_reason reason,
enum fc_ba_rjt_explan explan)
{
struct fc_frame *fp;
struct fc_frame_header *rx_fh;
struct fc_frame_header *fh;
struct fc_ba_rjt *rp;
struct fc_seq *sp;
struct fc_lport *lport;
unsigned int f_ctl;
lport = fr_dev(rx_fp);
sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*rp));
if (!fp) {
FC_EXCH_DBG(fc_seq_exch(sp),
"Drop BA_RJT request, out of memory\n");
return;
}
fh = fc_frame_header_get(fp);
rx_fh = fc_frame_header_get(rx_fp);
memset(fh, 0, sizeof(*fh) + sizeof(*rp));
rp = fc_frame_payload_get(fp, sizeof(*rp));
rp->br_reason = reason;
rp->br_explan = explan;
/*
* seq_id, cs_ctl, df_ctl and param/offset are zero.
*/
memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
fh->fh_ox_id = rx_fh->fh_ox_id;
fh->fh_rx_id = rx_fh->fh_rx_id;
fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
fh->fh_r_ctl = FC_RCTL_BA_RJT;
fh->fh_type = FC_TYPE_BLS;
/*
* Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
* Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
* Bits 9-8 are meaningful (retransmitted or unidirectional).
* Last ACK uses bits 7-6 (continue sequence),
* bits 5-4 are meaningful (what kind of ACK to use).
* Always set LAST_SEQ, END_SEQ.
*/
f_ctl = ntoh24(rx_fh->fh_f_ctl);
f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
FC_FC_END_CONN | FC_FC_SEQ_INIT |
FC_FC_RETX_SEQ | FC_FC_UNI_TX;
f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
f_ctl &= ~FC_FC_FIRST_SEQ;
hton24(fh->fh_f_ctl, f_ctl);
fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
fr_eof(fp) = FC_EOF_T;
if (fc_sof_needs_ack(fr_sof(fp)))
fr_eof(fp) = FC_EOF_N;
lport->tt.frame_send(lport, fp);
}
/**
* fc_exch_recv_abts() - Handle an incoming ABTS
* @ep: The exchange the abort was on
* @rx_fp: The ABTS frame
*
* This would be for target mode usually, but could be due to lost
* FCP transfer ready, confirm or RRQ. We always handle this as an
* exchange abort, ignoring the parameter.
*/
static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
{
struct fc_frame *fp;
struct fc_ba_acc *ap;
struct fc_frame_header *fh;
struct fc_seq *sp;
if (!ep)
goto reject;
FC_EXCH_DBG(ep, "exch: ABTS received\n");
fp = fc_frame_alloc(ep->lp, sizeof(*ap));
if (!fp) {
FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
goto free;
}
spin_lock_bh(&ep->ex_lock);
if (ep->esb_stat & ESB_ST_COMPLETE) {
spin_unlock_bh(&ep->ex_lock);
FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
fc_frame_free(fp);
goto reject;
}
if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
ep->esb_stat |= ESB_ST_REC_QUAL;
fc_exch_hold(ep); /* hold for REC_QUAL */
}
fc_exch_timer_set_locked(ep, ep->r_a_tov);
fh = fc_frame_header_get(fp);
ap = fc_frame_payload_get(fp, sizeof(*ap));
memset(ap, 0, sizeof(*ap));
sp = &ep->seq;
ap->ba_high_seq_cnt = htons(0xffff);
if (sp->ssb_stat & SSB_ST_RESP) {
ap->ba_seq_id = sp->id;
ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
ap->ba_high_seq_cnt = fh->fh_seq_cnt;
ap->ba_low_seq_cnt = htons(sp->cnt);
}
sp = fc_seq_start_next_locked(sp);
fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
ep->esb_stat |= ESB_ST_ABNORMAL;
spin_unlock_bh(&ep->ex_lock);
free:
fc_frame_free(rx_fp);
return;
reject:
fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
goto free;
}
/**
* fc_seq_assign() - Assign exchange and sequence for incoming request
* @lport: The local port that received the request
* @fp: The request frame
*
* On success, the sequence pointer will be returned and also in fr_seq(@fp).
* A reference will be held on the exchange/sequence for the caller, which
* must call fc_seq_release().
*/
struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_exch_mgr_anchor *ema;
WARN_ON(lport != fr_dev(fp));
WARN_ON(fr_seq(fp));
fr_seq(fp) = NULL;
list_for_each_entry(ema, &lport->ema_list, ema_list)
if ((!ema->match || ema->match(fp)) &&
fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
break;
return fr_seq(fp);
}
EXPORT_SYMBOL(fc_seq_assign);
/**
* fc_seq_release() - Release the hold
* @sp: The sequence.
*/
void fc_seq_release(struct fc_seq *sp)
{
fc_exch_release(fc_seq_exch(sp));
}
EXPORT_SYMBOL(fc_seq_release);
/**
* fc_exch_recv_req() - Handler for an incoming request
* @lport: The local port that received the request
* @mp: The EM that the exchange is on
* @fp: The request frame
*
* This is used when the other end is originating the exchange
* and the sequence.
*/
static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_seq *sp = NULL;
struct fc_exch *ep = NULL;
enum fc_pf_rjt_reason reject;
/* We can have the wrong fc_lport at this point with NPIV, which is a
* problem now that we know a new exchange needs to be allocated
*/
lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
if (!lport) {
fc_frame_free(fp);
return;
}
fr_dev(fp) = lport;
BUG_ON(fr_seq(fp)); /* XXX remove later */
/*
* If the RX_ID is 0xffff, don't allocate an exchange.
* The upper-level protocol may request one later, if needed.
*/
if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
return fc_lport_recv(lport, fp);
reject = fc_seq_lookup_recip(lport, mp, fp);
if (reject == FC_RJT_NONE) {
sp = fr_seq(fp); /* sequence will be held */
ep = fc_seq_exch(sp);
fc_seq_send_ack(sp, fp);
ep->encaps = fr_encaps(fp);
/*
* Call the receive function.
*
* The receive function may allocate a new sequence
* over the old one, so we shouldn't change the
* sequence after this.
*
* The frame will be freed by the receive function.
* If new exch resp handler is valid then call that
* first.
*/
if (!fc_invoke_resp(ep, sp, fp))
fc_lport_recv(lport, fp);
fc_exch_release(ep); /* release from lookup */
} else {
FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
reject);
fc_frame_free(fp);
}
}
/**
* fc_exch_recv_seq_resp() - Handler for an incoming response where the other
* end is the originator of the sequence that is a
* response to our initial exchange
* @mp: The EM that the exchange is on
* @fp: The response frame
*/
static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_seq *sp;
struct fc_exch *ep;
enum fc_sof sof;
u32 f_ctl;
int rc;
ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
if (!ep) {
atomic_inc(&mp->stats.xid_not_found);
goto out;
}
if (ep->esb_stat & ESB_ST_COMPLETE) {
atomic_inc(&mp->stats.xid_not_found);
goto rel;
}
if (ep->rxid == FC_XID_UNKNOWN)
ep->rxid = ntohs(fh->fh_rx_id);
if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
atomic_inc(&mp->stats.xid_not_found);
goto rel;
}
if (ep->did != ntoh24(fh->fh_s_id) &&
ep->did != FC_FID_FLOGI) {
atomic_inc(&mp->stats.xid_not_found);
goto rel;
}
sof = fr_sof(fp);
sp = &ep->seq;
if (fc_sof_is_init(sof)) {
sp->ssb_stat |= SSB_ST_RESP;
sp->id = fh->fh_seq_id;
}
f_ctl = ntoh24(fh->fh_f_ctl);
fr_seq(fp) = sp;
spin_lock_bh(&ep->ex_lock);
if (f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat |= ESB_ST_SEQ_INIT;
spin_unlock_bh(&ep->ex_lock);
if (fc_sof_needs_ack(sof))
fc_seq_send_ack(sp, fp);
if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
(f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
spin_lock_bh(&ep->ex_lock);
rc = fc_exch_done_locked(ep);
WARN_ON(fc_seq_exch(sp) != ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc) {
fc_exch_delete(ep);
} else {
FC_EXCH_DBG(ep, "ep is completed already,"
"hence skip calling the resp\n");
goto skip_resp;
}
}
/*
* Call the receive function.
* The sequence is held (has a refcnt) for us,
* but not for the receive function.
*
* The receive function may allocate a new sequence
* over the old one, so we shouldn't change the
* sequence after this.
*
* The frame will be freed by the receive function.
* If new exch resp handler is valid then call that
* first.
*/
if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp);
skip_resp:
fc_exch_release(ep);
return;
rel:
fc_exch_release(ep);
out:
fc_frame_free(fp);
}
/**
* fc_exch_recv_resp() - Handler for a sequence where other end is
* responding to our sequence
* @mp: The EM that the exchange is on
* @fp: The response frame
*/
static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
{
struct fc_seq *sp;
sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
if (!sp)
atomic_inc(&mp->stats.xid_not_found);
else
atomic_inc(&mp->stats.non_bls_resp);
fc_frame_free(fp);
}
/**
* fc_exch_abts_resp() - Handler for a response to an ABT
* @ep: The exchange that the frame is on
* @fp: The response frame
*
* This response would be to an ABTS cancelling an exchange or sequence.
* The response can be either BA_ACC or BA_RJT
*/
static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
{
struct fc_frame_header *fh;
struct fc_ba_acc *ap;
struct fc_seq *sp;
u16 low;
u16 high;
int rc = 1, has_rec = 0;
fh = fc_frame_header_get(fp);
FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
fc_exch_rctl_name(fh->fh_r_ctl));
if (cancel_delayed_work_sync(&ep->timeout_work)) {
FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
fc_exch_release(ep); /* release from pending timer hold */
return;
}
spin_lock_bh(&ep->ex_lock);
switch (fh->fh_r_ctl) {
case FC_RCTL_BA_ACC:
ap = fc_frame_payload_get(fp, sizeof(*ap));
if (!ap)
break;
/*
* Decide whether to establish a Recovery Qualifier.
* We do this if there is a non-empty SEQ_CNT range and
* SEQ_ID is the same as the one we aborted.
*/
low = ntohs(ap->ba_low_seq_cnt);
high = ntohs(ap->ba_high_seq_cnt);
if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
(ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
ap->ba_seq_id == ep->seq_id) && low != high) {
ep->esb_stat |= ESB_ST_REC_QUAL;
fc_exch_hold(ep); /* hold for recovery qualifier */
has_rec = 1;
}
break;
case FC_RCTL_BA_RJT:
break;
default:
break;
}
/* do we need to do some other checks here. Can we reuse more of
* fc_exch_recv_seq_resp
*/
sp = &ep->seq;
/*
* do we want to check END_SEQ as well as LAST_SEQ here?
*/
if (ep->fh_type != FC_TYPE_FCP &&
ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
fc_exch_hold(ep);
if (!rc)
fc_exch_delete(ep);
if (!fc_invoke_resp(ep, sp, fp))
fc_frame_free(fp);
if (has_rec)
fc_exch_timer_set(ep, ep->r_a_tov);
fc_exch_release(ep);
}
/**
* fc_exch_recv_bls() - Handler for a BLS sequence
* @mp: The EM that the exchange is on
* @fp: The request frame
*
* The BLS frame is always a sequence initiated by the remote side.
* We may be either the originator or recipient of the exchange.
*/
static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
{
struct fc_frame_header *fh;
struct fc_exch *ep;
u32 f_ctl;
fh = fc_frame_header_get(fp);
f_ctl = ntoh24(fh->fh_f_ctl);
fr_seq(fp) = NULL;
ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
spin_lock_bh(&ep->ex_lock);
ep->esb_stat |= ESB_ST_SEQ_INIT;
spin_unlock_bh(&ep->ex_lock);
}
if (f_ctl & FC_FC_SEQ_CTX) {
/*
* A response to a sequence we initiated.
* This should only be ACKs for class 2 or F.
*/
switch (fh->fh_r_ctl) {
case FC_RCTL_ACK_1:
case FC_RCTL_ACK_0:
break;
default:
if (ep)
FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
fh->fh_r_ctl,
fc_exch_rctl_name(fh->fh_r_ctl));
break;
}
fc_frame_free(fp);
} else {
switch (fh->fh_r_ctl) {
case FC_RCTL_BA_RJT:
case FC_RCTL_BA_ACC:
if (ep)
fc_exch_abts_resp(ep, fp);
else
fc_frame_free(fp);
break;
case FC_RCTL_BA_ABTS:
if (ep)
fc_exch_recv_abts(ep, fp);
else
fc_frame_free(fp);
break;
default: /* ignore junk */
fc_frame_free(fp);
break;
}
}
if (ep)
fc_exch_release(ep); /* release hold taken by fc_exch_find */
}
/**
* fc_seq_ls_acc() - Accept sequence with LS_ACC
* @rx_fp: The received frame, not freed here.
*
* If this fails due to allocation or transmit congestion, assume the
* originator will repeat the sequence.
*/
static void fc_seq_ls_acc(struct fc_frame *rx_fp)
{
struct fc_lport *lport;
struct fc_els_ls_acc *acc;
struct fc_frame *fp;
struct fc_seq *sp;
lport = fr_dev(rx_fp);
sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*acc));
if (!fp) {
FC_EXCH_DBG(fc_seq_exch(sp),
"exch: drop LS_ACC, out of memory\n");
return;
}
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
acc->la_cmd = ELS_LS_ACC;
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
}
/**
* fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
* @rx_fp: The received frame, not freed here.
* @reason: The reason the sequence is being rejected
* @explan: The explanation for the rejection
*
* If this fails due to allocation or transmit congestion, assume the
* originator will repeat the sequence.
*/
static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
enum fc_els_rjt_explan explan)
{
struct fc_lport *lport;
struct fc_els_ls_rjt *rjt;
struct fc_frame *fp;
struct fc_seq *sp;
lport = fr_dev(rx_fp);
sp = fr_seq(rx_fp);
fp = fc_frame_alloc(lport, sizeof(*rjt));
if (!fp) {
FC_EXCH_DBG(fc_seq_exch(sp),
"exch: drop LS_ACC, out of memory\n");
return;
}
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
memset(rjt, 0, sizeof(*rjt));
rjt->er_cmd = ELS_LS_RJT;
rjt->er_reason = reason;
rjt->er_explan = explan;
fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
}
/**
* fc_exch_reset() - Reset an exchange
* @ep: The exchange to be reset
*
* Note: May sleep if invoked from outside a response handler.
*/
static void fc_exch_reset(struct fc_exch *ep)
{
struct fc_seq *sp;
int rc = 1;
spin_lock_bh(&ep->ex_lock);
ep->state |= FC_EX_RST_CLEANUP;
fc_exch_timer_cancel(ep);
if (ep->esb_stat & ESB_ST_REC_QUAL)
atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
ep->esb_stat &= ~ESB_ST_REC_QUAL;
sp = &ep->seq;
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
fc_exch_hold(ep);
if (!rc) {
fc_exch_delete(ep);
} else {
FC_EXCH_DBG(ep, "ep is completed already,"
"hence skip calling the resp\n");
goto skip_resp;
}
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
skip_resp:
fc_seq_set_resp(sp, NULL, ep->arg);
fc_exch_release(ep);
}
/**
* fc_exch_pool_reset() - Reset a per cpu exchange pool
* @lport: The local port that the exchange pool is on
* @pool: The exchange pool to be reset
* @sid: The source ID
* @did: The destination ID
*
* Resets a per cpu exches pool, releasing all of its sequences
* and exchanges. If sid is non-zero then reset only exchanges
* we sourced from the local port's FID. If did is non-zero then
* only reset exchanges destined for the local port's FID.
*/
static void fc_exch_pool_reset(struct fc_lport *lport,
struct fc_exch_pool *pool,
u32 sid, u32 did)
{
struct fc_exch *ep;
struct fc_exch *next;
spin_lock_bh(&pool->lock);
restart:
list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
if ((lport == ep->lp) &&
(sid == 0 || sid == ep->sid) &&
(did == 0 || did == ep->did)) {
fc_exch_hold(ep);
spin_unlock_bh(&pool->lock);
fc_exch_reset(ep);
fc_exch_release(ep);
spin_lock_bh(&pool->lock);
/*
* must restart loop incase while lock
* was down multiple eps were released.
*/
goto restart;
}
}
pool->next_index = 0;
pool->left = FC_XID_UNKNOWN;
pool->right = FC_XID_UNKNOWN;
spin_unlock_bh(&pool->lock);
}
/**
* fc_exch_mgr_reset() - Reset all EMs of a local port
* @lport: The local port whose EMs are to be reset
* @sid: The source ID
* @did: The destination ID
*
* Reset all EMs associated with a given local port. Release all
* sequences and exchanges. If sid is non-zero then reset only the
* exchanges sent from the local port's FID. If did is non-zero then
* reset only exchanges destined for the local port's FID.
*/
void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
{
struct fc_exch_mgr_anchor *ema;
unsigned int cpu;
list_for_each_entry(ema, &lport->ema_list, ema_list) {
for_each_possible_cpu(cpu)
fc_exch_pool_reset(lport,
per_cpu_ptr(ema->mp->pool, cpu),
sid, did);
}
}
EXPORT_SYMBOL(fc_exch_mgr_reset);
/**
* fc_exch_lookup() - find an exchange
* @lport: The local port
* @xid: The exchange ID
*
* Returns exchange pointer with hold for caller, or NULL if not found.
*/
static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
{
struct fc_exch_mgr_anchor *ema;
list_for_each_entry(ema, &lport->ema_list, ema_list)
if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
return fc_exch_find(ema->mp, xid);
return NULL;
}
/**
* fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
* @rfp: The REC frame, not freed here.
*
* Note that the requesting port may be different than the S_ID in the request.
*/
static void fc_exch_els_rec(struct fc_frame *rfp)
{
struct fc_lport *lport;
struct fc_frame *fp;
struct fc_exch *ep;
struct fc_els_rec *rp;
struct fc_els_rec_acc *acc;
enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
enum fc_els_rjt_explan explan;
u32 sid;
u16 xid, rxid, oxid;
lport = fr_dev(rfp);
rp = fc_frame_payload_get(rfp, sizeof(*rp));
explan = ELS_EXPL_INV_LEN;
if (!rp)
goto reject;
sid = ntoh24(rp->rec_s_id);
rxid = ntohs(rp->rec_rx_id);
oxid = ntohs(rp->rec_ox_id);
explan = ELS_EXPL_OXID_RXID;
if (sid == fc_host_port_id(lport->host))
xid = oxid;
else
xid = rxid;
if (xid == FC_XID_UNKNOWN) {
FC_LPORT_DBG(lport,
"REC request from %x: invalid rxid %x oxid %x\n",
sid, rxid, oxid);
goto reject;
}
ep = fc_exch_lookup(lport, xid);
if (!ep) {
FC_LPORT_DBG(lport,
"REC request from %x: rxid %x oxid %x not found\n",
sid, rxid, oxid);
goto reject;
}
FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
sid, rxid, oxid);
if (ep->oid != sid || oxid != ep->oxid)
goto rel;
if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
goto rel;
fp = fc_frame_alloc(lport, sizeof(*acc));
if (!fp) {
FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
goto out;
}
acc = fc_frame_payload_get(fp, sizeof(*acc));
memset(acc, 0, sizeof(*acc));
acc->reca_cmd = ELS_LS_ACC;
acc->reca_ox_id = rp->rec_ox_id;
memcpy(acc->reca_ofid, rp->rec_s_id, 3);
acc->reca_rx_id = htons(ep->rxid);
if (ep->sid == ep->oid)
hton24(acc->reca_rfid, ep->did);
else
hton24(acc->reca_rfid, ep->sid);
acc->reca_fc4value = htonl(ep->seq.rec_data);
acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
ESB_ST_SEQ_INIT |
ESB_ST_COMPLETE));
fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
lport->tt.frame_send(lport, fp);
out:
fc_exch_release(ep);
return;
rel:
fc_exch_release(ep);
reject:
fc_seq_ls_rjt(rfp, reason, explan);
}
/**
* fc_exch_rrq_resp() - Handler for RRQ responses
* @sp: The sequence that the RRQ is on
* @fp: The RRQ frame
* @arg: The exchange that the RRQ is on
*
* TODO: fix error handler.
*/
static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
{
struct fc_exch *aborted_ep = arg;
unsigned int op;
if (IS_ERR(fp)) {
int err = PTR_ERR(fp);
if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
goto cleanup;
FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
"frame error %d\n", err);
return;
}
op = fc_frame_payload_op(fp);
fc_frame_free(fp);
switch (op) {
case ELS_LS_RJT:
FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
fallthrough;
case ELS_LS_ACC:
goto cleanup;
default:
FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
op);
return;
}
cleanup:
fc_exch_done(&aborted_ep->seq);
/* drop hold for rec qual */
fc_exch_release(aborted_ep);
}
/**
* fc_exch_seq_send() - Send a frame using a new exchange and sequence
* @lport: The local port to send the frame on
* @fp: The frame to be sent
* @resp: The response handler for this request
* @destructor: The destructor for the exchange
* @arg: The argument to be passed to the response handler
* @timer_msec: The timeout period for the exchange
*
* The exchange response handler is set in this routine to resp()
* function pointer. It can be called in two scenarios: if a timeout
* occurs or if a response frame is received for the exchange. The
* fc_frame pointer in response handler will also indicate timeout
* as error using IS_ERR related macros.
*
* The exchange destructor handler is also set in this routine.
* The destructor handler is invoked by EM layer when exchange
* is about to free, this can be used by caller to free its
* resources along with exchange free.
*
* The arg is passed back to resp and destructor handler.
*
* The timeout value (in msec) for an exchange is set if non zero
* timer_msec argument is specified. The timer is canceled when
* it fires or when the exchange is done. The exchange timeout handler
* is registered by EM layer.
*
* The frame pointer with some of the header's fields must be
* filled before calling this routine, those fields are:
*
* - routing control
* - FC port did
* - FC port sid
* - FC header type
* - frame control
* - parameter or relative offset
*/
struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
struct fc_frame *fp,
void (*resp)(struct fc_seq *,
struct fc_frame *fp,
void *arg),
void (*destructor)(struct fc_seq *, void *),
void *arg, u32 timer_msec)
{
struct fc_exch *ep;
struct fc_seq *sp = NULL;
struct fc_frame_header *fh;
struct fc_fcp_pkt *fsp = NULL;
int rc = 1;
ep = fc_exch_alloc(lport, fp);
if (!ep) {
fc_frame_free(fp);
return NULL;
}
ep->esb_stat |= ESB_ST_SEQ_INIT;
fh = fc_frame_header_get(fp);
fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
ep->resp = resp;
ep->destructor = destructor;
ep->arg = arg;
ep->r_a_tov = lport->r_a_tov;
ep->lp = lport;
sp = &ep->seq;
ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
ep->f_ctl = ntoh24(fh->fh_f_ctl);
fc_exch_setup_hdr(ep, fp, ep->f_ctl);
sp->cnt++;
if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
fsp = fr_fsp(fp);
fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
}
if (unlikely(lport->tt.frame_send(lport, fp)))
goto err;
if (timer_msec)
fc_exch_timer_set_locked(ep, timer_msec);
ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
if (ep->f_ctl & FC_FC_SEQ_INIT)
ep->esb_stat &= ~ESB_ST_SEQ_INIT;
spin_unlock_bh(&ep->ex_lock);
return sp;
err:
if (fsp)
fc_fcp_ddp_done(fsp);
rc = fc_exch_done_locked(ep);
spin_unlock_bh(&ep->ex_lock);
if (!rc)
fc_exch_delete(ep);
return NULL;
}
EXPORT_SYMBOL(fc_exch_seq_send);
/**
* fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
* @ep: The exchange to send the RRQ on
*
* This tells the remote port to stop blocking the use of
* the exchange and the seq_cnt range.
*/
static void fc_exch_rrq(struct fc_exch *ep)
{
struct fc_lport *lport;
struct fc_els_rrq *rrq;
struct fc_frame *fp;
u32 did;
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*rrq));
if (!fp)
goto retry;
rrq = fc_frame_payload_get(fp, sizeof(*rrq));
memset(rrq, 0, sizeof(*rrq));
rrq->rrq_cmd = ELS_RRQ;
hton24(rrq->rrq_s_id, ep->sid);
rrq->rrq_ox_id = htons(ep->oxid);
rrq->rrq_rx_id = htons(ep->rxid);
did = ep->did;
if (ep->esb_stat & ESB_ST_RESP)
did = ep->sid;
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
lport->port_id, FC_TYPE_ELS,
FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
lport->e_d_tov))
return;
retry:
FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
spin_lock_bh(&ep->ex_lock);
if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
spin_unlock_bh(&ep->ex_lock);
/* drop hold for rec qual */
fc_exch_release(ep);
return;
}
ep->esb_stat |= ESB_ST_REC_QUAL;
fc_exch_timer_set_locked(ep, ep->r_a_tov);
spin_unlock_bh(&ep->ex_lock);
}
/**
* fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
* @fp: The RRQ frame, not freed here.
*/
static void fc_exch_els_rrq(struct fc_frame *fp)
{
struct fc_lport *lport;
struct fc_exch *ep = NULL; /* request or subject exchange */
struct fc_els_rrq *rp;
u32 sid;
u16 xid;
enum fc_els_rjt_explan explan;
lport = fr_dev(fp);
rp = fc_frame_payload_get(fp, sizeof(*rp));
explan = ELS_EXPL_INV_LEN;
if (!rp)
goto reject;
/*
* lookup subject exchange.
*/
sid = ntoh24(rp->rrq_s_id); /* subject source */
xid = fc_host_port_id(lport->host) == sid ?
ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
ep = fc_exch_lookup(lport, xid);
explan = ELS_EXPL_OXID_RXID;
if (!ep)
goto reject;
spin_lock_bh(&ep->ex_lock);
FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
if (ep->oxid != ntohs(rp->rrq_ox_id))
goto unlock_reject;
if (ep->rxid != ntohs(rp->rrq_rx_id) &&
ep->rxid != FC_XID_UNKNOWN)
goto unlock_reject;
explan = ELS_EXPL_SID;
if (ep->sid != sid)
goto unlock_reject;
/*
* Clear Recovery Qualifier state, and cancel timer if complete.
*/
if (ep->esb_stat & ESB_ST_REC_QUAL) {
ep->esb_stat &= ~ESB_ST_REC_QUAL;
atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
}
if (ep->esb_stat & ESB_ST_COMPLETE)
fc_exch_timer_cancel(ep);
spin_unlock_bh(&ep->ex_lock);
/*
* Send LS_ACC.
*/
fc_seq_ls_acc(fp);
goto out;
unlock_reject:
spin_unlock_bh(&ep->ex_lock);
reject:
fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
out:
if (ep)
fc_exch_release(ep); /* drop hold from fc_exch_find */
}
/**
* fc_exch_update_stats() - update exches stats to lport
* @lport: The local port to update exchange manager stats
*/
void fc_exch_update_stats(struct fc_lport *lport)
{
struct fc_host_statistics *st;
struct fc_exch_mgr_anchor *ema;
struct fc_exch_mgr *mp;
st = &lport->host_stats;
list_for_each_entry(ema, &lport->ema_list, ema_list) {
mp = ema->mp;
st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
st->fc_no_free_exch_xid +=
atomic_read(&mp->stats.no_free_exch_xid);
st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
}
}
EXPORT_SYMBOL(fc_exch_update_stats);
/**
* fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
* @lport: The local port to add the exchange manager to
* @mp: The exchange manager to be added to the local port
* @match: The match routine that indicates when this EM should be used
*/
struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
struct fc_exch_mgr *mp,
bool (*match)(struct fc_frame *))
{
struct fc_exch_mgr_anchor *ema;
ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
if (!ema)
return ema;
ema->mp = mp;
ema->match = match;
/* add EM anchor to EM anchors list */
list_add_tail(&ema->ema_list, &lport->ema_list);
kref_get(&mp->kref);
return ema;
}
EXPORT_SYMBOL(fc_exch_mgr_add);
/**
* fc_exch_mgr_destroy() - Destroy an exchange manager
* @kref: The reference to the EM to be destroyed
*/
static void fc_exch_mgr_destroy(struct kref *kref)
{
struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
mempool_destroy(mp->ep_pool);
free_percpu(mp->pool);
kfree(mp);
}
/**
* fc_exch_mgr_del() - Delete an EM from a local port's list
* @ema: The exchange manager anchor identifying the EM to be deleted
*/
void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
{
/* remove EM anchor from EM anchors list */
list_del(&ema->ema_list);
kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
kfree(ema);
}
EXPORT_SYMBOL(fc_exch_mgr_del);
/**
* fc_exch_mgr_list_clone() - Share all exchange manager objects
* @src: Source lport to clone exchange managers from
* @dst: New lport that takes references to all the exchange managers
*/
int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
{
struct fc_exch_mgr_anchor *ema, *tmp;
list_for_each_entry(ema, &src->ema_list, ema_list) {
if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
goto err;
}
return 0;
err:
list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
fc_exch_mgr_del(ema);
return -ENOMEM;
}
EXPORT_SYMBOL(fc_exch_mgr_list_clone);
/**
* fc_exch_mgr_alloc() - Allocate an exchange manager
* @lport: The local port that the new EM will be associated with
* @class: The default FC class for new exchanges
* @min_xid: The minimum XID for exchanges from the new EM
* @max_xid: The maximum XID for exchanges from the new EM
* @match: The match routine for the new EM
*/
struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
enum fc_class class,
u16 min_xid, u16 max_xid,
bool (*match)(struct fc_frame *))
{
struct fc_exch_mgr *mp;
u16 pool_exch_range;
size_t pool_size;
unsigned int cpu;
struct fc_exch_pool *pool;
if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
(min_xid & fc_cpu_mask) != 0) {
FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
min_xid, max_xid);
return NULL;
}
/*
* allocate memory for EM
*/
mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
if (!mp)
return NULL;
mp->class = class;
mp->lport = lport;
/* adjust em exch xid range for offload */
mp->min_xid = min_xid;
/* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
sizeof(struct fc_exch *);
if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
min_xid - 1;
} else {
mp->max_xid = max_xid;
pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
(fc_cpu_mask + 1);
}
mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
if (!mp->ep_pool)
goto free_mp;
/*
* Setup per cpu exch pool with entire exchange id range equally
* divided across all cpus. The exch pointers array memory is
* allocated for exch range per pool.
*/
mp->pool_max_index = pool_exch_range - 1;
/*
* Allocate and initialize per cpu exch pool
*/
pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
if (!mp->pool)
goto free_mempool;
for_each_possible_cpu(cpu) {
pool = per_cpu_ptr(mp->pool, cpu);
pool->next_index = 0;
pool->left = FC_XID_UNKNOWN;
pool->right = FC_XID_UNKNOWN;
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->ex_list);
}
kref_init(&mp->kref);
if (!fc_exch_mgr_add(lport, mp, match)) {
free_percpu(mp->pool);
goto free_mempool;
}
/*
* Above kref_init() sets mp->kref to 1 and then
* call to fc_exch_mgr_add incremented mp->kref again,
* so adjust that extra increment.
*/
kref_put(&mp->kref, fc_exch_mgr_destroy);
return mp;
free_mempool:
mempool_destroy(mp->ep_pool);
free_mp:
kfree(mp);
return NULL;
}
EXPORT_SYMBOL(fc_exch_mgr_alloc);
/**
* fc_exch_mgr_free() - Free all exchange managers on a local port
* @lport: The local port whose EMs are to be freed
*/
void fc_exch_mgr_free(struct fc_lport *lport)
{
struct fc_exch_mgr_anchor *ema, *next;
flush_workqueue(fc_exch_workqueue);
list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
fc_exch_mgr_del(ema);
}
EXPORT_SYMBOL(fc_exch_mgr_free);
/**
* fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
* upon 'xid'.
* @f_ctl: f_ctl
* @lport: The local port the frame was received on
* @fh: The received frame header
*/
static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
struct fc_lport *lport,
struct fc_frame_header *fh)
{
struct fc_exch_mgr_anchor *ema;
u16 xid;
if (f_ctl & FC_FC_EX_CTX)
xid = ntohs(fh->fh_ox_id);
else {
xid = ntohs(fh->fh_rx_id);
if (xid == FC_XID_UNKNOWN)
return list_entry(lport->ema_list.prev,
typeof(*ema), ema_list);
}
list_for_each_entry(ema, &lport->ema_list, ema_list) {
if ((xid >= ema->mp->min_xid) &&
(xid <= ema->mp->max_xid))
return ema;
}
return NULL;
}
/**
* fc_exch_recv() - Handler for received frames
* @lport: The local port the frame was received on
* @fp: The received frame
*/
void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fc_exch_mgr_anchor *ema;
u32 f_ctl;
/* lport lock ? */
if (!lport || lport->state == LPORT_ST_DISABLED) {
FC_LIBFC_DBG("Receiving frames for an lport that "
"has not been initialized correctly\n");
fc_frame_free(fp);
return;
}
f_ctl = ntoh24(fh->fh_f_ctl);
ema = fc_find_ema(f_ctl, lport, fh);
if (!ema) {
FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
"fc_ctl <0x%x>, xid <0x%x>\n",
f_ctl,
(f_ctl & FC_FC_EX_CTX) ?
ntohs(fh->fh_ox_id) :
ntohs(fh->fh_rx_id));
fc_frame_free(fp);
return;
}
/*
* If frame is marked invalid, just drop it.
*/
switch (fr_eof(fp)) {
case FC_EOF_T:
if (f_ctl & FC_FC_END_SEQ)
skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
fallthrough;
case FC_EOF_N:
if (fh->fh_type == FC_TYPE_BLS)
fc_exch_recv_bls(ema->mp, fp);
else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
FC_FC_EX_CTX)
fc_exch_recv_seq_resp(ema->mp, fp);
else if (f_ctl & FC_FC_SEQ_CTX)
fc_exch_recv_resp(ema->mp, fp);
else /* no EX_CTX and no SEQ_CTX */
fc_exch_recv_req(lport, ema->mp, fp);
break;
default:
FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
fr_eof(fp));
fc_frame_free(fp);
}
}
EXPORT_SYMBOL(fc_exch_recv);
/**
* fc_exch_init() - Initialize the exchange layer for a local port
* @lport: The local port to initialize the exchange layer for
*/
int fc_exch_init(struct fc_lport *lport)
{
if (!lport->tt.exch_mgr_reset)
lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
return 0;
}
EXPORT_SYMBOL(fc_exch_init);
/**
* fc_setup_exch_mgr() - Setup an exchange manager
*/
int fc_setup_exch_mgr(void)
{
fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!fc_em_cachep)
return -ENOMEM;
/*
* Initialize fc_cpu_mask and fc_cpu_order. The
* fc_cpu_mask is set for nr_cpu_ids rounded up
* to order of 2's * power and order is stored
* in fc_cpu_order as this is later required in
* mapping between an exch id and exch array index
* in per cpu exch pool.
*
* This round up is required to align fc_cpu_mask
* to exchange id's lower bits such that all incoming
* frames of an exchange gets delivered to the same
* cpu on which exchange originated by simple bitwise
* AND operation between fc_cpu_mask and exchange id.
*/
fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
fc_cpu_mask = (1 << fc_cpu_order) - 1;
fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
if (!fc_exch_workqueue)
goto err;
return 0;
err:
kmem_cache_destroy(fc_em_cachep);
return -ENOMEM;
}
/**
* fc_destroy_exch_mgr() - Destroy an exchange manager
*/
void fc_destroy_exch_mgr(void)
{
destroy_workqueue(fc_exch_workqueue);
kmem_cache_destroy(fc_em_cachep);
}
| linux-master | drivers/scsi/libfc/fc_exch.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2007 Intel Corporation. All rights reserved.
* Copyright(c) 2008 Red Hat, Inc. All rights reserved.
* Copyright(c) 2008 Mike Christie
*
* Maintained at www.Open-FCoE.org
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/scatterlist.h>
#include <linux/err.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/fc/fc_fc2.h>
#include <scsi/libfc.h>
#include "fc_encode.h"
#include "fc_libfc.h"
static struct kmem_cache *scsi_pkt_cachep;
/* SRB state definitions */
#define FC_SRB_FREE 0 /* cmd is free */
#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
#define FC_SRB_ABORTED (1 << 3) /* abort acknowledged */
#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
#define FC_SRB_READ (1 << 1)
#define FC_SRB_WRITE (1 << 0)
static struct libfc_cmd_priv *libfc_priv(struct scsi_cmnd *cmd)
{
return scsi_cmd_priv(cmd);
}
/**
* struct fc_fcp_internal - FCP layer internal data
* @scsi_pkt_pool: Memory pool to draw FCP packets from
* @scsi_queue_lock: Protects the scsi_pkt_queue
* @scsi_pkt_queue: Current FCP packets
* @last_can_queue_ramp_down_time: ramp down time
* @last_can_queue_ramp_up_time: ramp up time
* @max_can_queue: max can_queue size
*/
struct fc_fcp_internal {
mempool_t *scsi_pkt_pool;
spinlock_t scsi_queue_lock;
struct list_head scsi_pkt_queue;
unsigned long last_can_queue_ramp_down_time;
unsigned long last_can_queue_ramp_up_time;
int max_can_queue;
};
#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
/*
* function prototypes
* FC scsi I/O related functions
*/
static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code);
static void fc_fcp_timeout(struct timer_list *);
static void fc_fcp_rec(struct fc_fcp_pkt *);
static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
static void fc_io_compl(struct fc_fcp_pkt *);
static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
/*
* command status codes
*/
#define FC_COMPLETE 0
#define FC_CMD_ABORTED 1
#define FC_CMD_RESET 2
#define FC_CMD_PLOGO 3
#define FC_SNS_RCV 4
#define FC_TRANS_ERR 5
#define FC_DATA_OVRRUN 6
#define FC_DATA_UNDRUN 7
#define FC_ERROR 8
#define FC_HRD_ERROR 9
#define FC_CRC_ERROR 10
#define FC_TIMED_OUT 11
#define FC_TRANS_RESET 12
/*
* Error recovery timeout values.
*/
#define FC_SCSI_TM_TOV (10 * HZ)
#define FC_HOST_RESET_TIMEOUT (30 * HZ)
#define FC_CAN_QUEUE_PERIOD (60 * HZ)
#define FC_MAX_ERROR_CNT 5
#define FC_MAX_RECOV_RETRY 3
#define FC_FCP_DFLT_QUEUE_DEPTH 32
/**
* fc_fcp_pkt_alloc() - Allocate a fcp_pkt
* @lport: The local port that the FCP packet is for
* @gfp: GFP flags for allocation
*
* Return value: fcp_pkt structure or null on allocation failure.
* Context: Can be called from process context, no lock is required.
*/
static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
{
struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
struct fc_fcp_pkt *fsp;
fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
if (fsp) {
memset(fsp, 0, sizeof(*fsp));
fsp->lp = lport;
fsp->xfer_ddp = FC_XID_UNKNOWN;
refcount_set(&fsp->ref_cnt, 1);
timer_setup(&fsp->timer, NULL, 0);
INIT_LIST_HEAD(&fsp->list);
spin_lock_init(&fsp->scsi_pkt_lock);
} else {
this_cpu_inc(lport->stats->FcpPktAllocFails);
}
return fsp;
}
/**
* fc_fcp_pkt_release() - Release hold on a fcp_pkt
* @fsp: The FCP packet to be released
*
* Context: Can be called from process or interrupt context,
* no lock is required.
*/
static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
{
if (refcount_dec_and_test(&fsp->ref_cnt)) {
struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
mempool_free(fsp, si->scsi_pkt_pool);
}
}
/**
* fc_fcp_pkt_hold() - Hold a fcp_pkt
* @fsp: The FCP packet to be held
*/
static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
{
refcount_inc(&fsp->ref_cnt);
}
/**
* fc_fcp_pkt_destroy() - Release hold on a fcp_pkt
* @seq: The sequence that the FCP packet is on (required by destructor API)
* @fsp: The FCP packet to be released
*
* This routine is called by a destructor callback in the fc_exch_seq_send()
* routine of the libfc Transport Template. The 'struct fc_seq' is a required
* argument even though it is not used by this routine.
*
* Context: No locking required.
*/
static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
{
fc_fcp_pkt_release(fsp);
}
/**
* fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count
* @fsp: The FCP packet to be locked and incremented
*
* We should only return error if we return a command to SCSI-ml before
* getting a response. This can happen in cases where we send a abort, but
* do not wait for the response and the abort and command can be passing
* each other on the wire/network-layer.
*
* Note: this function locks the packet and gets a reference to allow
* callers to call the completion function while the lock is held and
* not have to worry about the packets refcount.
*
* TODO: Maybe we should just have callers grab/release the lock and
* have a function that they call to verify the fsp and grab a ref if
* needed.
*/
static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
{
spin_lock_bh(&fsp->scsi_pkt_lock);
if (fsp->state & FC_SRB_COMPL) {
spin_unlock_bh(&fsp->scsi_pkt_lock);
return -EPERM;
}
fc_fcp_pkt_hold(fsp);
return 0;
}
/**
* fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its
* reference count
* @fsp: The FCP packet to be unlocked and decremented
*/
static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
{
spin_unlock_bh(&fsp->scsi_pkt_lock);
fc_fcp_pkt_release(fsp);
}
/**
* fc_fcp_timer_set() - Start a timer for a fcp_pkt
* @fsp: The FCP packet to start a timer for
* @delay: The timeout period in jiffies
*/
static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
{
if (!(fsp->state & FC_SRB_COMPL)) {
mod_timer(&fsp->timer, jiffies + delay);
fsp->timer_delay = delay;
}
}
static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp)
{
fsp->state |= FC_SRB_ABORTED;
fsp->state &= ~FC_SRB_ABORT_PENDING;
if (fsp->wait_for_comp)
complete(&fsp->tm_done);
else
fc_fcp_complete_locked(fsp);
}
/**
* fc_fcp_send_abort() - Send an abort for exchanges associated with a
* fcp_pkt
* @fsp: The FCP packet to abort exchanges on
*/
static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
{
int rc;
if (!fsp->seq_ptr)
return -EINVAL;
this_cpu_inc(fsp->lp->stats->FcpPktAborts);
fsp->state |= FC_SRB_ABORT_PENDING;
rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
/*
* fc_seq_exch_abort() might return -ENXIO if
* the sequence is already completed
*/
if (rc == -ENXIO) {
fc_fcp_abort_done(fsp);
rc = 0;
}
return rc;
}
/**
* fc_fcp_retry_cmd() - Retry a fcp_pkt
* @fsp: The FCP packet to be retried
* @status_code: The FCP status code to set
*
* Sets the status code to be FC_ERROR and then calls
* fc_fcp_complete_locked() which in turn calls fc_io_compl().
* fc_io_compl() will notify the SCSI-ml that the I/O is done.
* The SCSI-ml will retry the command.
*/
static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code)
{
if (fsp->seq_ptr) {
fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->state &= ~FC_SRB_ABORT_PENDING;
fsp->io_status = 0;
fsp->status_code = status_code;
fc_fcp_complete_locked(fsp);
}
/**
* fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context
* @fsp: The FCP packet that will manage the DDP frames
* @xid: The XID that will be used for the DDP exchange
*/
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
{
struct fc_lport *lport;
lport = fsp->lp;
if ((fsp->req_flags & FC_SRB_READ) &&
(lport->lro_enabled) && (lport->tt.ddp_setup)) {
if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd),
scsi_sg_count(fsp->cmd)))
fsp->xfer_ddp = xid;
}
}
/**
* fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any
* DDP related resources for a fcp_pkt
* @fsp: The FCP packet that DDP had been used on
*/
void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
{
struct fc_lport *lport;
if (!fsp)
return;
if (fsp->xfer_ddp == FC_XID_UNKNOWN)
return;
lport = fsp->lp;
if (lport->tt.ddp_done) {
fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp);
fsp->xfer_ddp = FC_XID_UNKNOWN;
}
}
/**
* fc_fcp_can_queue_ramp_up() - increases can_queue
* @lport: lport to ramp up can_queue
*/
static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
{
struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
unsigned long flags;
int can_queue;
spin_lock_irqsave(lport->host->host_lock, flags);
if (si->last_can_queue_ramp_up_time &&
(time_before(jiffies, si->last_can_queue_ramp_up_time +
FC_CAN_QUEUE_PERIOD)))
goto unlock;
if (time_before(jiffies, si->last_can_queue_ramp_down_time +
FC_CAN_QUEUE_PERIOD))
goto unlock;
si->last_can_queue_ramp_up_time = jiffies;
can_queue = lport->host->can_queue << 1;
if (can_queue >= si->max_can_queue) {
can_queue = si->max_can_queue;
si->last_can_queue_ramp_down_time = 0;
}
lport->host->can_queue = can_queue;
shost_printk(KERN_ERR, lport->host, "libfc: increased "
"can_queue to %d.\n", can_queue);
unlock:
spin_unlock_irqrestore(lport->host->host_lock, flags);
}
/**
* fc_fcp_can_queue_ramp_down() - reduces can_queue
* @lport: lport to reduce can_queue
*
* If we are getting memory allocation failures, then we may
* be trying to execute too many commands. We let the running
* commands complete or timeout, then try again with a reduced
* can_queue. Eventually we will hit the point where we run
* on all reserved structs.
*/
static bool fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
{
struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
unsigned long flags;
int can_queue;
bool changed = false;
spin_lock_irqsave(lport->host->host_lock, flags);
if (si->last_can_queue_ramp_down_time &&
(time_before(jiffies, si->last_can_queue_ramp_down_time +
FC_CAN_QUEUE_PERIOD)))
goto unlock;
si->last_can_queue_ramp_down_time = jiffies;
can_queue = lport->host->can_queue;
can_queue >>= 1;
if (!can_queue)
can_queue = 1;
lport->host->can_queue = can_queue;
changed = true;
unlock:
spin_unlock_irqrestore(lport->host->host_lock, flags);
return changed;
}
/*
* fc_fcp_frame_alloc() - Allocates fc_frame structure and buffer.
* @lport: fc lport struct
* @len: payload length
*
* Allocates fc_frame structure and buffer but if fails to allocate
* then reduce can_queue.
*/
static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
size_t len)
{
struct fc_frame *fp;
fp = fc_frame_alloc(lport, len);
if (likely(fp))
return fp;
this_cpu_inc(lport->stats->FcpFrameAllocFails);
/* error case */
fc_fcp_can_queue_ramp_down(lport);
shost_printk(KERN_ERR, lport->host,
"libfc: Could not allocate frame, "
"reducing can_queue to %d.\n", lport->host->can_queue);
return NULL;
}
/**
* get_fsp_rec_tov() - Helper function to get REC_TOV
* @fsp: the FCP packet
*
* Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
*/
static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
{
struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
unsigned int e_d_tov = FC_DEF_E_D_TOV;
if (rpriv && rpriv->e_d_tov > e_d_tov)
e_d_tov = rpriv->e_d_tov;
return msecs_to_jiffies(e_d_tov) + HZ;
}
/**
* fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
* @fsp: The FCP packet the data is on
* @fp: The data frame
*/
static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
struct scsi_cmnd *sc = fsp->cmd;
struct fc_lport *lport = fsp->lp;
struct fc_frame_header *fh;
size_t start_offset;
size_t offset;
u32 crc;
u32 copy_len = 0;
size_t len;
void *buf;
struct scatterlist *sg;
u32 nents;
u8 host_bcode = FC_COMPLETE;
fh = fc_frame_header_get(fp);
offset = ntohl(fh->fh_parm_offset);
start_offset = offset;
len = fr_len(fp) - sizeof(*fh);
buf = fc_frame_payload_get(fp, 0);
/*
* if this I/O is ddped then clear it and initiate recovery since data
* frames are expected to be placed directly in that case.
*
* Indicate error to scsi-ml because something went wrong with the
* ddp handling to get us here.
*/
if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
fc_fcp_ddp_done(fsp);
FC_FCP_DBG(fsp, "DDP I/O in fc_fcp_recv_data set ERROR\n");
host_bcode = FC_ERROR;
goto err;
}
if (offset + len > fsp->data_len) {
/* this should never happen */
if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
fc_frame_crc_check(fp))
goto crc_err;
FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
"data_len %x\n", len, offset, fsp->data_len);
/* Data is corrupted indicate scsi-ml should retry */
host_bcode = FC_DATA_OVRRUN;
goto err;
}
if (offset != fsp->xfer_len)
fsp->state |= FC_SRB_DISCONTIG;
sg = scsi_sglist(sc);
nents = scsi_sg_count(sc);
if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
&offset, NULL);
} else {
crc = crc32(~0, (u8 *) fh, sizeof(*fh));
copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
&offset, &crc);
buf = fc_frame_payload_get(fp, 0);
if (len % 4)
crc = crc32(crc, buf + len, 4 - (len % 4));
if (~crc != le32_to_cpu(fr_crc(fp))) {
crc_err:
this_cpu_inc(lport->stats->ErrorFrames);
/* per cpu count, not total count, but OK for limit */
if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < FC_MAX_ERROR_CNT)
printk(KERN_WARNING "libfc: CRC error on data "
"frame for port (%6.6x)\n",
lport->port_id);
/*
* Assume the frame is total garbage.
* We may have copied it over the good part
* of the buffer.
* If so, we need to retry the entire operation.
* Otherwise, ignore it.
*/
if (fsp->state & FC_SRB_DISCONTIG) {
host_bcode = FC_CRC_ERROR;
goto err;
}
return;
}
}
if (fsp->xfer_contig_end == start_offset)
fsp->xfer_contig_end += copy_len;
fsp->xfer_len += copy_len;
/*
* In the very rare event that this data arrived after the response
* and completes the transfer, call the completion handler.
*/
if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
fsp->xfer_len == fsp->data_len - fsp->scsi_resid) {
FC_FCP_DBG( fsp, "complete out-of-order sequence\n" );
fc_fcp_complete_locked(fsp);
}
return;
err:
fc_fcp_recovery(fsp, host_bcode);
}
/**
* fc_fcp_send_data() - Send SCSI data to a target
* @fsp: The FCP packet the data is on
* @seq: The sequence the data is to be sent on
* @offset: The starting offset for this data request
* @seq_blen: The burst length for this data request
*
* Called after receiving a Transfer Ready data descriptor.
* If the LLD is capable of sequence offload then send down the
* seq_blen amount of data in single frame, otherwise send
* multiple frames of the maximum frame payload supported by
* the target port.
*/
static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
size_t offset, size_t seq_blen)
{
struct fc_exch *ep;
struct scsi_cmnd *sc;
struct scatterlist *sg;
struct fc_frame *fp = NULL;
struct fc_lport *lport = fsp->lp;
struct page *page;
size_t remaining;
size_t t_blen;
size_t tlen;
size_t sg_bytes;
size_t frame_offset, fh_parm_offset;
size_t off;
int error;
void *data = NULL;
void *page_addr;
int using_sg = lport->sg_supp;
u32 f_ctl;
WARN_ON(seq_blen <= 0);
if (unlikely(offset + seq_blen > fsp->data_len)) {
/* this should never happen */
FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
"offset %zx\n", seq_blen, offset);
fc_fcp_send_abort(fsp);
return 0;
} else if (offset != fsp->xfer_len) {
/* Out of Order Data Request - no problem, but unexpected. */
FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
"seq_blen %zx offset %zx\n", seq_blen, offset);
}
/*
* if LLD is capable of seq_offload then set transport
* burst length (t_blen) to seq_blen, otherwise set t_blen
* to max FC frame payload previously set in fsp->max_payload.
*/
t_blen = fsp->max_payload;
if (lport->seq_offload) {
t_blen = min(seq_blen, (size_t)lport->lso_max);
FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
fsp, seq_blen, lport->lso_max, t_blen);
}
if (t_blen > 512)
t_blen &= ~(512 - 1); /* round down to block size */
sc = fsp->cmd;
remaining = seq_blen;
fh_parm_offset = frame_offset = offset;
tlen = 0;
seq = fc_seq_start_next(seq);
f_ctl = FC_FC_REL_OFF;
WARN_ON(!seq);
sg = scsi_sglist(sc);
while (remaining > 0 && sg) {
if (offset >= sg->length) {
offset -= sg->length;
sg = sg_next(sg);
continue;
}
if (!fp) {
tlen = min(t_blen, remaining);
/*
* TODO. Temporary workaround. fc_seq_send() can't
* handle odd lengths in non-linear skbs.
* This will be the final fragment only.
*/
if (tlen % 4)
using_sg = 0;
fp = fc_frame_alloc(lport, using_sg ? 0 : tlen);
if (!fp)
return -ENOMEM;
data = fc_frame_header_get(fp) + 1;
fh_parm_offset = frame_offset;
fr_max_payload(fp) = fsp->max_payload;
}
off = offset + sg->offset;
sg_bytes = min(tlen, sg->length - offset);
sg_bytes = min(sg_bytes,
(size_t) (PAGE_SIZE - (off & ~PAGE_MASK)));
page = sg_page(sg) + (off >> PAGE_SHIFT);
if (using_sg) {
get_page(page);
skb_fill_page_desc(fp_skb(fp),
skb_shinfo(fp_skb(fp))->nr_frags,
page, off & ~PAGE_MASK, sg_bytes);
fp_skb(fp)->data_len += sg_bytes;
fr_len(fp) += sg_bytes;
fp_skb(fp)->truesize += PAGE_SIZE;
} else {
/*
* The scatterlist item may be bigger than PAGE_SIZE,
* but we must not cross pages inside the kmap.
*/
page_addr = kmap_atomic(page);
memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
sg_bytes);
kunmap_atomic(page_addr);
data += sg_bytes;
}
offset += sg_bytes;
frame_offset += sg_bytes;
tlen -= sg_bytes;
remaining -= sg_bytes;
if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) &&
(tlen))
continue;
/*
* Send sequence with transfer sequence initiative in case
* this is last FCP frame of the sequence.
*/
if (remaining == 0)
f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
ep = fc_seq_exch(seq);
fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, fh_parm_offset);
/*
* send fragment using for a sequence.
*/
error = fc_seq_send(lport, seq, fp);
if (error) {
WARN_ON(1); /* send error should be rare */
return error;
}
fp = NULL;
}
fsp->xfer_len += seq_blen; /* premature count? */
return 0;
}
/**
* fc_fcp_abts_resp() - Receive an ABTS response
* @fsp: The FCP packet that is being aborted
* @fp: The response frame
*/
static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
int ba_done = 1;
struct fc_ba_rjt *brp;
struct fc_frame_header *fh;
fh = fc_frame_header_get(fp);
switch (fh->fh_r_ctl) {
case FC_RCTL_BA_ACC:
break;
case FC_RCTL_BA_RJT:
brp = fc_frame_payload_get(fp, sizeof(*brp));
if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
break;
fallthrough;
default:
/*
* we will let the command timeout
* and scsi-ml recover in this case,
* therefore cleared the ba_done flag.
*/
ba_done = 0;
}
if (ba_done)
fc_fcp_abort_done(fsp);
}
/**
* fc_fcp_recv() - Receive an FCP frame
* @seq: The sequence the frame is on
* @fp: The received frame
* @arg: The related FCP packet
*
* Context: Called from Soft IRQ context. Can not be called
* holding the FCP packet list lock.
*/
static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
struct fc_lport *lport = fsp->lp;
struct fc_frame_header *fh;
struct fcp_txrdy *dd;
u8 r_ctl;
int rc = 0;
if (IS_ERR(fp)) {
fc_fcp_error(fsp, fp);
return;
}
fh = fc_frame_header_get(fp);
r_ctl = fh->fh_r_ctl;
if (lport->state != LPORT_ST_READY) {
FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n",
lport->state, r_ctl);
goto out;
}
if (fc_fcp_lock_pkt(fsp))
goto out;
if (fh->fh_type == FC_TYPE_BLS) {
fc_fcp_abts_resp(fsp, fp);
goto unlock;
}
if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) {
FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl);
goto unlock;
}
if (r_ctl == FC_RCTL_DD_DATA_DESC) {
/*
* received XFER RDY from the target
* need to send data to the target
*/
WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
dd = fc_frame_payload_get(fp, sizeof(*dd));
WARN_ON(!dd);
rc = fc_fcp_send_data(fsp, seq,
(size_t) ntohl(dd->ft_data_ro),
(size_t) ntohl(dd->ft_burst_len));
if (!rc)
seq->rec_data = fsp->xfer_len;
} else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
/*
* received a DATA frame
* next we will copy the data to the system buffer
*/
WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
fc_fcp_recv_data(fsp, fp);
seq->rec_data = fsp->xfer_contig_end;
} else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
fc_fcp_resp(fsp, fp);
} else {
FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl);
}
unlock:
fc_fcp_unlock_pkt(fsp);
out:
fc_frame_free(fp);
}
/**
* fc_fcp_resp() - Handler for FCP responses
* @fsp: The FCP packet the response is for
* @fp: The response frame
*/
static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
struct fc_frame_header *fh;
struct fcp_resp *fc_rp;
struct fcp_resp_ext *rp_ex;
struct fcp_resp_rsp_info *fc_rp_info;
u32 plen;
u32 expected_len;
u32 respl = 0;
u32 snsl = 0;
u8 flags = 0;
plen = fr_len(fp);
fh = (struct fc_frame_header *)fr_hdr(fp);
if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
goto len_err;
plen -= sizeof(*fh);
fc_rp = (struct fcp_resp *)(fh + 1);
fsp->cdb_status = fc_rp->fr_status;
flags = fc_rp->fr_flags;
fsp->scsi_comp_flags = flags;
expected_len = fsp->data_len;
/* if ddp, update xfer len */
fc_fcp_ddp_done(fsp);
if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
rp_ex = (void *)(fc_rp + 1);
if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
goto len_err;
fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
if (flags & FCP_RSP_LEN_VAL) {
respl = ntohl(rp_ex->fr_rsp_len);
if ((respl != FCP_RESP_RSP_INFO_LEN4) &&
(respl != FCP_RESP_RSP_INFO_LEN8))
goto len_err;
if (fsp->wait_for_comp) {
/* Abuse cdb_status for rsp code */
fsp->cdb_status = fc_rp_info->rsp_code;
complete(&fsp->tm_done);
/*
* tmfs will not have any scsi cmd so
* exit here
*/
return;
}
}
if (flags & FCP_SNS_LEN_VAL) {
snsl = ntohl(rp_ex->fr_sns_len);
if (snsl > SCSI_SENSE_BUFFERSIZE)
snsl = SCSI_SENSE_BUFFERSIZE;
memcpy(fsp->cmd->sense_buffer,
(char *)fc_rp_info + respl, snsl);
}
}
if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
goto len_err;
if (flags & FCP_RESID_UNDER) {
fsp->scsi_resid = ntohl(rp_ex->fr_resid);
/*
* The cmnd->underflow is the minimum number of
* bytes that must be transferred for this
* command. Provided a sense condition is not
* present, make sure the actual amount
* transferred is at least the underflow value
* or fail.
*/
if (!(flags & FCP_SNS_LEN_VAL) &&
(fc_rp->fr_status == 0) &&
(scsi_bufflen(fsp->cmd) -
fsp->scsi_resid) < fsp->cmd->underflow)
goto err;
expected_len -= fsp->scsi_resid;
} else {
fsp->status_code = FC_ERROR;
}
}
}
fsp->state |= FC_SRB_RCV_STATUS;
/*
* Check for missing or extra data frames.
*/
if (unlikely(fsp->cdb_status == SAM_STAT_GOOD &&
fsp->xfer_len != expected_len)) {
if (fsp->xfer_len < expected_len) {
/*
* Some data may be queued locally,
* Wait a at least one jiffy to see if it is delivered.
* If this expires without data, we may do SRR.
*/
if (fsp->lp->qfull) {
FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n",
fsp->rport->port_id);
return;
}
FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun "
"len %x, data len %x\n",
fsp->rport->port_id,
fsp->xfer_len, expected_len, fsp->data_len);
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
return;
}
fsp->status_code = FC_DATA_OVRRUN;
FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, "
"len %x, data len %x\n",
fsp->rport->port_id,
fsp->xfer_len, expected_len, fsp->data_len);
}
fc_fcp_complete_locked(fsp);
return;
len_err:
FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
"snsl %u\n", flags, fr_len(fp), respl, snsl);
err:
fsp->status_code = FC_ERROR;
fc_fcp_complete_locked(fsp);
}
/**
* fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the
* fcp_pkt lock held
* @fsp: The FCP packet to be completed
*
* This function may sleep if a timer is pending. The packet lock must be
* held, and the host lock must not be held.
*/
static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
{
struct fc_lport *lport = fsp->lp;
struct fc_seq *seq;
struct fc_exch *ep;
u32 f_ctl;
if (fsp->state & FC_SRB_ABORT_PENDING)
return;
if (fsp->state & FC_SRB_ABORTED) {
if (!fsp->status_code)
fsp->status_code = FC_CMD_ABORTED;
} else {
/*
* Test for transport underrun, independent of response
* underrun status.
*/
if (fsp->cdb_status == SAM_STAT_GOOD &&
fsp->xfer_len < fsp->data_len && !fsp->io_status &&
(!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n",
fsp->xfer_len, fsp->data_len);
fsp->status_code = FC_DATA_UNDRUN;
}
}
seq = fsp->seq_ptr;
if (seq) {
fsp->seq_ptr = NULL;
if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
struct fc_frame *conf_frame;
struct fc_seq *csp;
csp = fc_seq_start_next(seq);
conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
if (conf_frame) {
f_ctl = FC_FC_SEQ_INIT;
f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
ep = fc_seq_exch(seq);
fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
ep->did, ep->sid,
FC_TYPE_FCP, f_ctl, 0);
fc_seq_send(lport, csp, conf_frame);
}
}
fc_exch_done(seq);
}
/*
* Some resets driven by SCSI are not I/Os and do not have
* SCSI commands associated with the requests. We should not
* call I/O completion if we do not have a SCSI command.
*/
if (fsp->cmd)
fc_io_compl(fsp);
}
/**
* fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt
* @fsp: The FCP packet whose exchanges should be canceled
* @error: The reason for the cancellation
*/
static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
{
if (fsp->seq_ptr) {
fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->status_code = error;
}
/**
* fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port
* @lport: The local port whose exchanges should be canceled
* @id: The target's ID
* @lun: The LUN
* @error: The reason for cancellation
*
* If lun or id is -1, they are ignored.
*/
static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
unsigned int lun, int error)
{
struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
struct fc_fcp_pkt *fsp;
struct scsi_cmnd *sc_cmd;
unsigned long flags;
spin_lock_irqsave(&si->scsi_queue_lock, flags);
restart:
list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
sc_cmd = fsp->cmd;
if (id != -1 && scmd_id(sc_cmd) != id)
continue;
if (lun != -1 && sc_cmd->device->lun != lun)
continue;
fc_fcp_pkt_hold(fsp);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
spin_lock_bh(&fsp->scsi_pkt_lock);
if (!(fsp->state & FC_SRB_COMPL)) {
fsp->state |= FC_SRB_COMPL;
/*
* TODO: dropping scsi_pkt_lock and then reacquiring
* again around fc_fcp_cleanup_cmd() is required,
* since fc_fcp_cleanup_cmd() calls into
* fc_seq_set_resp() and that func preempts cpu using
* schedule. May be schedule and related code should be
* removed instead of unlocking here to avoid scheduling
* while atomic bug.
*/
spin_unlock_bh(&fsp->scsi_pkt_lock);
fc_fcp_cleanup_cmd(fsp, error);
spin_lock_bh(&fsp->scsi_pkt_lock);
fc_io_compl(fsp);
}
spin_unlock_bh(&fsp->scsi_pkt_lock);
fc_fcp_pkt_release(fsp);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
/*
* while we dropped the lock multiple pkts could
* have been released, so we have to start over.
*/
goto restart;
}
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
}
/**
* fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port
* @lport: The local port whose exchanges are to be aborted
*/
static void fc_fcp_abort_io(struct fc_lport *lport)
{
fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR);
}
/**
* fc_fcp_pkt_send() - Send a fcp_pkt
* @lport: The local port to send the FCP packet on
* @fsp: The FCP packet to send
*
* Return: Zero for success and -1 for failure
* Locks: Called without locks held
*/
static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
{
struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
unsigned long flags;
int rc;
libfc_priv(fsp->cmd)->fsp = fsp;
fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
int_to_scsilun(fsp->cmd->device->lun, &fsp->cdb_cmd.fc_lun);
memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_add_tail(&fsp->list, &si->scsi_pkt_queue);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
if (unlikely(rc)) {
spin_lock_irqsave(&si->scsi_queue_lock, flags);
libfc_priv(fsp->cmd)->fsp = NULL;
list_del(&fsp->list);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
}
return rc;
}
/**
* fc_fcp_cmd_send() - Send a FCP command
* @lport: The local port to send the command on
* @fsp: The FCP packet the command is on
* @resp: The handler for the response
*/
static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
void (*resp)(struct fc_seq *,
struct fc_frame *fp,
void *arg))
{
struct fc_frame *fp;
struct fc_seq *seq;
struct fc_rport *rport;
struct fc_rport_libfc_priv *rpriv;
const size_t len = sizeof(fsp->cdb_cmd);
int rc = 0;
if (fc_fcp_lock_pkt(fsp))
return 0;
fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd));
if (!fp) {
rc = -1;
goto unlock;
}
memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
fr_fsp(fp) = fsp;
rport = fsp->rport;
fsp->max_payload = rport->maxframe_size;
rpriv = rport->dd_data;
fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FCTL_REQ, 0);
seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
if (!seq) {
rc = -1;
goto unlock;
}
fsp->seq_ptr = seq;
fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
fsp->timer.function = fc_fcp_timeout;
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
unlock:
fc_fcp_unlock_pkt(fsp);
return rc;
}
/**
* fc_fcp_error() - Handler for FCP layer errors
* @fsp: The FCP packet the error is on
* @fp: The frame that has errored
*/
static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
int error = PTR_ERR(fp);
if (fc_fcp_lock_pkt(fsp))
return;
if (error == -FC_EX_CLOSED) {
fc_fcp_retry_cmd(fsp, FC_ERROR);
goto unlock;
}
/*
* clear abort pending, because the lower layer
* decided to force completion.
*/
fsp->state &= ~FC_SRB_ABORT_PENDING;
fsp->status_code = FC_CMD_PLOGO;
fc_fcp_complete_locked(fsp);
unlock:
fc_fcp_unlock_pkt(fsp);
}
/**
* fc_fcp_pkt_abort() - Abort a fcp_pkt
* @fsp: The FCP packet to abort on
*
* Called to send an abort and then wait for abort completion
*/
static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
{
int rc = FAILED;
unsigned long ticks_left;
FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state);
if (fc_fcp_send_abort(fsp)) {
FC_FCP_DBG(fsp, "failed to send abort\n");
return FAILED;
}
if (fsp->state & FC_SRB_ABORTED) {
FC_FCP_DBG(fsp, "target abort cmd completed\n");
return SUCCESS;
}
init_completion(&fsp->tm_done);
fsp->wait_for_comp = 1;
spin_unlock_bh(&fsp->scsi_pkt_lock);
ticks_left = wait_for_completion_timeout(&fsp->tm_done,
FC_SCSI_TM_TOV);
spin_lock_bh(&fsp->scsi_pkt_lock);
fsp->wait_for_comp = 0;
if (!ticks_left) {
FC_FCP_DBG(fsp, "target abort cmd failed\n");
} else if (fsp->state & FC_SRB_ABORTED) {
FC_FCP_DBG(fsp, "target abort cmd passed\n");
rc = SUCCESS;
fc_fcp_complete_locked(fsp);
}
return rc;
}
/**
* fc_lun_reset_send() - Send LUN reset command
* @t: Timer context used to fetch the FSP packet
*/
static void fc_lun_reset_send(struct timer_list *t)
{
struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer);
struct fc_lport *lport = fsp->lp;
if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
return;
if (fc_fcp_lock_pkt(fsp))
return;
fsp->timer.function = fc_lun_reset_send;
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
fc_fcp_unlock_pkt(fsp);
}
}
/**
* fc_lun_reset() - Send a LUN RESET command to a device
* and wait for the reply
* @lport: The local port to sent the command on
* @fsp: The FCP packet that identifies the LUN to be reset
* @id: The SCSI command ID
* @lun: The LUN ID to be reset
*/
static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
unsigned int id, unsigned int lun)
{
int rc;
fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
int_to_scsilun(lun, &fsp->cdb_cmd.fc_lun);
fsp->wait_for_comp = 1;
init_completion(&fsp->tm_done);
fc_lun_reset_send(&fsp->timer);
/*
* wait for completion of reset
* after that make sure all commands are terminated
*/
rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
spin_lock_bh(&fsp->scsi_pkt_lock);
fsp->state |= FC_SRB_COMPL;
spin_unlock_bh(&fsp->scsi_pkt_lock);
del_timer_sync(&fsp->timer);
spin_lock_bh(&fsp->scsi_pkt_lock);
if (fsp->seq_ptr) {
fc_exch_done(fsp->seq_ptr);
fsp->seq_ptr = NULL;
}
fsp->wait_for_comp = 0;
spin_unlock_bh(&fsp->scsi_pkt_lock);
if (!rc) {
FC_SCSI_DBG(lport, "lun reset failed\n");
return FAILED;
}
/* cdb_status holds the tmf's rsp code */
if (fsp->cdb_status != FCP_TMF_CMPL)
return FAILED;
FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun);
fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED);
return SUCCESS;
}
/**
* fc_tm_done() - Task Management response handler
* @seq: The sequence that the response is on
* @fp: The response frame
* @arg: The FCP packet the response is for
*/
static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
struct fc_fcp_pkt *fsp = arg;
struct fc_frame_header *fh;
if (IS_ERR(fp)) {
/*
* If there is an error just let it timeout or wait
* for TMF to be aborted if it timedout.
*
* scsi-eh will escalate for when either happens.
*/
return;
}
if (fc_fcp_lock_pkt(fsp))
goto out;
/*
* raced with eh timeout handler.
*/
if (!fsp->seq_ptr || !fsp->wait_for_comp)
goto out_unlock;
fh = fc_frame_header_get(fp);
if (fh->fh_type != FC_TYPE_BLS)
fc_fcp_resp(fsp, fp);
fsp->seq_ptr = NULL;
fc_exch_done(seq);
out_unlock:
fc_fcp_unlock_pkt(fsp);
out:
fc_frame_free(fp);
}
/**
* fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port
* @lport: The local port to be cleaned up
*/
static void fc_fcp_cleanup(struct fc_lport *lport)
{
fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR);
}
/**
* fc_fcp_timeout() - Handler for fcp_pkt timeouts
* @t: Timer context used to fetch the FSP packet
*
* If REC is supported then just issue it and return. The REC exchange will
* complete or time out and recovery can continue at that point. Otherwise,
* if the response has been received without all the data it has been
* ER_TIMEOUT since the response was received. If the response has not been
* received we see if data was received recently. If it has been then we
* continue waiting, otherwise, we abort the command.
*/
static void fc_fcp_timeout(struct timer_list *t)
{
struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer);
struct fc_rport *rport = fsp->rport;
struct fc_rport_libfc_priv *rpriv = rport->dd_data;
if (fc_fcp_lock_pkt(fsp))
return;
if (fsp->cdb_cmd.fc_tm_flags)
goto unlock;
if (fsp->lp->qfull) {
FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
fsp->timer_delay);
fsp->timer.function = fc_fcp_timeout;
fc_fcp_timer_set(fsp, fsp->timer_delay);
goto unlock;
}
FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n",
fsp->timer_delay, rpriv->flags, fsp->state);
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
fc_fcp_rec(fsp);
else if (fsp->state & FC_SRB_RCV_STATUS)
fc_fcp_complete_locked(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
unlock:
fc_fcp_unlock_pkt(fsp);
}
/**
* fc_fcp_rec() - Send a REC ELS request
* @fsp: The FCP packet to send the REC request on
*/
static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
{
struct fc_lport *lport;
struct fc_frame *fp;
struct fc_rport *rport;
struct fc_rport_libfc_priv *rpriv;
lport = fsp->lp;
rport = fsp->rport;
rpriv = rport->dd_data;
if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) {
fsp->status_code = FC_HRD_ERROR;
fsp->io_status = 0;
fc_fcp_complete_locked(fsp);
return;
}
fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
if (!fp)
goto retry;
fr_seq(fp) = fsp->seq_ptr;
fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
rpriv->local_port->port_id, FC_TYPE_ELS,
FC_FCTL_REQ, 0);
if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
fc_fcp_rec_resp, fsp,
2 * lport->r_a_tov)) {
fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
return;
}
retry:
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
}
/**
* fc_fcp_rec_resp() - Handler for REC ELS responses
* @seq: The sequence the response is on
* @fp: The response frame
* @arg: The FCP packet the response is on
*
* If the response is a reject then the scsi layer will handle
* the timeout. If the response is a LS_ACC then if the I/O was not completed
* set the timeout and return. If the I/O was completed then complete the
* exchange and tell the SCSI layer.
*/
static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
struct fc_els_rec_acc *recp;
struct fc_els_ls_rjt *rjt;
u32 e_stat;
u8 opcode;
u32 offset;
enum dma_data_direction data_dir;
enum fc_rctl r_ctl;
struct fc_rport_libfc_priv *rpriv;
if (IS_ERR(fp)) {
fc_fcp_rec_error(fsp, fp);
return;
}
if (fc_fcp_lock_pkt(fsp))
goto out;
fsp->recov_retry = 0;
opcode = fc_frame_payload_op(fp);
if (opcode == ELS_LS_RJT) {
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
switch (rjt->er_reason) {
default:
FC_FCP_DBG(fsp,
"device %x invalid REC reject %d/%d\n",
fsp->rport->port_id, rjt->er_reason,
rjt->er_explan);
fallthrough;
case ELS_RJT_UNSUP:
FC_FCP_DBG(fsp, "device does not support REC\n");
rpriv = fsp->rport->dd_data;
/*
* if we do not spport RECs or got some bogus
* reason then resetup timer so we check for
* making progress.
*/
rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
break;
case ELS_RJT_LOGIC:
case ELS_RJT_UNAB:
FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n",
fsp->rport->port_id, rjt->er_reason,
rjt->er_explan);
/*
* If response got lost or is stuck in the
* queue somewhere we have no idea if and when
* the response will be received. So quarantine
* the xid and retry the command.
*/
if (rjt->er_explan == ELS_EXPL_OXID_RXID) {
struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
ep->state |= FC_EX_QUARANTINE;
fsp->state |= FC_SRB_ABORTED;
fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
break;
}
fc_fcp_recovery(fsp, FC_TRANS_RESET);
break;
}
} else if (opcode == ELS_LS_ACC) {
if (fsp->state & FC_SRB_ABORTED)
goto unlock_out;
data_dir = fsp->cmd->sc_data_direction;
recp = fc_frame_payload_get(fp, sizeof(*recp));
offset = ntohl(recp->reca_fc4value);
e_stat = ntohl(recp->reca_e_stat);
if (e_stat & ESB_ST_COMPLETE) {
/*
* The exchange is complete.
*
* For output, we must've lost the response.
* For input, all data must've been sent.
* We lost may have lost the response
* (and a confirmation was requested) and maybe
* some data.
*
* If all data received, send SRR
* asking for response. If partial data received,
* or gaps, SRR requests data at start of gap.
* Recovery via SRR relies on in-order-delivery.
*/
if (data_dir == DMA_TO_DEVICE) {
r_ctl = FC_RCTL_DD_CMD_STATUS;
} else if (fsp->xfer_contig_end == offset) {
r_ctl = FC_RCTL_DD_CMD_STATUS;
} else {
offset = fsp->xfer_contig_end;
r_ctl = FC_RCTL_DD_SOL_DATA;
}
fc_fcp_srr(fsp, r_ctl, offset);
} else if (e_stat & ESB_ST_SEQ_INIT) {
/*
* The remote port has the initiative, so just
* keep waiting for it to complete.
*/
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
} else {
/*
* The exchange is incomplete, we have seq. initiative.
* Lost response with requested confirmation,
* lost confirmation, lost transfer ready or
* lost write data.
*
* For output, if not all data was received, ask
* for transfer ready to be repeated.
*
* If we received or sent all the data, send SRR to
* request response.
*
* If we lost a response, we may have lost some read
* data as well.
*/
r_ctl = FC_RCTL_DD_SOL_DATA;
if (data_dir == DMA_TO_DEVICE) {
r_ctl = FC_RCTL_DD_CMD_STATUS;
if (offset < fsp->data_len)
r_ctl = FC_RCTL_DD_DATA_DESC;
} else if (offset == fsp->xfer_contig_end) {
r_ctl = FC_RCTL_DD_CMD_STATUS;
} else if (fsp->xfer_contig_end < offset) {
offset = fsp->xfer_contig_end;
}
fc_fcp_srr(fsp, r_ctl, offset);
}
}
unlock_out:
fc_fcp_unlock_pkt(fsp);
out:
fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
fc_frame_free(fp);
}
/**
* fc_fcp_rec_error() - Handler for REC errors
* @fsp: The FCP packet the error is on
* @fp: The REC frame
*/
static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
int error = PTR_ERR(fp);
if (fc_fcp_lock_pkt(fsp))
goto out;
switch (error) {
case -FC_EX_CLOSED:
FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n",
fsp, fsp->rport->port_id);
fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
default:
FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
fsp, fsp->rport->port_id, error);
fsp->status_code = FC_CMD_PLOGO;
fallthrough;
case -FC_EX_TIMEOUT:
/*
* Assume REC or LS_ACC was lost.
* The exchange manager will have aborted REC, so retry.
*/
FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n",
fsp, fsp->rport->port_id, fsp->recov_retry,
FC_MAX_RECOV_RETRY);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
fc_fcp_recovery(fsp, FC_ERROR);
break;
}
fc_fcp_unlock_pkt(fsp);
out:
fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
}
/**
* fc_fcp_recovery() - Handler for fcp_pkt recovery
* @fsp: The FCP pkt that needs to be aborted
* @code: The FCP status code to set
*/
static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
{
FC_FCP_DBG(fsp, "start recovery code %x\n", code);
fsp->status_code = code;
fsp->cdb_status = 0;
fsp->io_status = 0;
/*
* if this fails then we let the scsi command timer fire and
* scsi-ml escalate.
*/
fc_fcp_send_abort(fsp);
}
/**
* fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request)
* @fsp: The FCP packet the SRR is to be sent on
* @r_ctl: The R_CTL field for the SRR request
* @offset: The SRR relative offset
* This is called after receiving status but insufficient data, or
* when expecting status but the request has timed out.
*/
static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
{
struct fc_lport *lport = fsp->lp;
struct fc_rport *rport;
struct fc_rport_libfc_priv *rpriv;
struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
struct fc_seq *seq;
struct fcp_srr *srr;
struct fc_frame *fp;
rport = fsp->rport;
rpriv = rport->dd_data;
if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
rpriv->rp_state != RPORT_ST_READY)
goto retry; /* shouldn't happen */
fp = fc_fcp_frame_alloc(lport, sizeof(*srr));
if (!fp)
goto retry;
srr = fc_frame_payload_get(fp, sizeof(*srr));
memset(srr, 0, sizeof(*srr));
srr->srr_op = ELS_SRR;
srr->srr_ox_id = htons(ep->oxid);
srr->srr_rx_id = htons(ep->rxid);
srr->srr_r_ctl = r_ctl;
srr->srr_rel_off = htonl(offset);
fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
rpriv->local_port->port_id, FC_TYPE_FCP,
FC_FCTL_REQ, 0);
seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp,
fc_fcp_pkt_destroy,
fsp, get_fsp_rec_tov(fsp));
if (!seq)
goto retry;
fsp->recov_seq = seq;
fsp->xfer_len = offset;
fsp->xfer_contig_end = offset;
fsp->state &= ~FC_SRB_RCV_STATUS;
fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
return;
retry:
fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
}
/**
* fc_fcp_srr_resp() - Handler for SRR response
* @seq: The sequence the SRR is on
* @fp: The SRR frame
* @arg: The FCP packet the SRR is on
*/
static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
struct fc_fcp_pkt *fsp = arg;
struct fc_frame_header *fh;
if (IS_ERR(fp)) {
fc_fcp_srr_error(fsp, fp);
return;
}
if (fc_fcp_lock_pkt(fsp))
goto out;
fh = fc_frame_header_get(fp);
/*
* BUG? fc_fcp_srr_error calls fc_exch_done which would release
* the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
* then fc_exch_timeout would be sending an abort. The fc_exch_done
* call by fc_fcp_srr_error would prevent fc_exch.c from seeing
* an abort response though.
*/
if (fh->fh_type == FC_TYPE_BLS) {
fc_fcp_unlock_pkt(fsp);
return;
}
switch (fc_frame_payload_op(fp)) {
case ELS_LS_ACC:
fsp->recov_retry = 0;
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
break;
case ELS_LS_RJT:
default:
fc_fcp_recovery(fsp, FC_ERROR);
break;
}
fc_fcp_unlock_pkt(fsp);
out:
fc_exch_done(seq);
fc_frame_free(fp);
}
/**
* fc_fcp_srr_error() - Handler for SRR errors
* @fsp: The FCP packet that the SRR error is on
* @fp: The SRR frame
*/
static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
if (fc_fcp_lock_pkt(fsp))
goto out;
switch (PTR_ERR(fp)) {
case -FC_EX_TIMEOUT:
FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry);
if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
fc_fcp_rec(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
break;
case -FC_EX_CLOSED: /* e.g., link failure */
FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
fallthrough;
default:
fc_fcp_retry_cmd(fsp, FC_ERROR);
break;
}
fc_fcp_unlock_pkt(fsp);
out:
fc_exch_done(fsp->recov_seq);
}
/**
* fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready
* @lport: The local port to be checked
*/
static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
{
/* lock ? */
return (lport->state == LPORT_ST_READY) &&
lport->link_up && !lport->qfull;
}
/**
* fc_queuecommand() - The queuecommand function of the SCSI template
* @shost: The Scsi_Host that the command was issued to
* @sc_cmd: The scsi_cmnd to be executed
*
* This is the i/o strategy routine, called by the SCSI layer.
*/
int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
{
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_fcp_pkt *fsp;
int rval;
int rc = 0;
rval = fc_remote_port_chkready(rport);
if (rval) {
sc_cmd->result = rval;
scsi_done(sc_cmd);
return 0;
}
if (!*(struct fc_remote_port **)rport->dd_data) {
/*
* rport is transitioning from blocked/deleted to
* online
*/
sc_cmd->result = DID_IMM_RETRY << 16;
scsi_done(sc_cmd);
goto out;
}
if (!fc_fcp_lport_queue_ready(lport)) {
if (lport->qfull) {
if (fc_fcp_can_queue_ramp_down(lport))
shost_printk(KERN_ERR, lport->host,
"libfc: queue full, "
"reducing can_queue to %d.\n",
lport->host->can_queue);
}
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC);
if (fsp == NULL) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
/*
* build the libfc request pkt
*/
fsp->cmd = sc_cmd; /* save the cmd */
fsp->rport = rport; /* set the remote port ptr */
/*
* set up the transfer length
*/
fsp->data_len = scsi_bufflen(sc_cmd);
fsp->xfer_len = 0;
/*
* setup the data direction
*/
if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
fsp->req_flags = FC_SRB_READ;
this_cpu_inc(lport->stats->InputRequests);
this_cpu_add(lport->stats->InputBytes, fsp->data_len);
} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
fsp->req_flags = FC_SRB_WRITE;
this_cpu_inc(lport->stats->OutputRequests);
this_cpu_add(lport->stats->OutputBytes, fsp->data_len);
} else {
fsp->req_flags = 0;
this_cpu_inc(lport->stats->ControlRequests);
}
/*
* send it to the lower layer
* if we get -1 return then put the request in the pending
* queue.
*/
rval = fc_fcp_pkt_send(lport, fsp);
if (rval != 0) {
fsp->state = FC_SRB_FREE;
fc_fcp_pkt_release(fsp);
rc = SCSI_MLQUEUE_HOST_BUSY;
}
out:
return rc;
}
EXPORT_SYMBOL(fc_queuecommand);
/**
* fc_io_compl() - Handle responses for completed commands
* @fsp: The FCP packet that is complete
*
* Translates fcp_pkt errors to a Linux SCSI errors.
* The fcp packet lock must be held when calling.
*/
static void fc_io_compl(struct fc_fcp_pkt *fsp)
{
struct fc_fcp_internal *si;
struct scsi_cmnd *sc_cmd;
struct fc_lport *lport;
unsigned long flags;
/* release outstanding ddp context */
fc_fcp_ddp_done(fsp);
fsp->state |= FC_SRB_COMPL;
if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
spin_unlock_bh(&fsp->scsi_pkt_lock);
del_timer_sync(&fsp->timer);
spin_lock_bh(&fsp->scsi_pkt_lock);
}
lport = fsp->lp;
si = fc_get_scsi_internal(lport);
/*
* if can_queue ramp down is done then try can_queue ramp up
* since commands are completing now.
*/
if (si->last_can_queue_ramp_down_time)
fc_fcp_can_queue_ramp_up(lport);
sc_cmd = fsp->cmd;
libfc_priv(sc_cmd)->status = fsp->cdb_status;
switch (fsp->status_code) {
case FC_COMPLETE:
if (fsp->cdb_status == 0) {
/*
* good I/O status
*/
sc_cmd->result = DID_OK << 16;
if (fsp->scsi_resid)
libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
} else {
/*
* transport level I/O was ok but scsi
* has non zero status
*/
sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
}
break;
case FC_ERROR:
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
"due to FC_ERROR\n");
sc_cmd->result = DID_ERROR << 16;
break;
case FC_DATA_UNDRUN:
if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) {
/*
* scsi status is good but transport level
* underrun.
*/
if (fsp->state & FC_SRB_RCV_STATUS) {
sc_cmd->result = DID_OK << 16;
} else {
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml"
" due to FC_DATA_UNDRUN (trans)\n");
sc_cmd->result = DID_ERROR << 16;
}
} else {
/*
* scsi got underrun, this is an error
*/
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
"due to FC_DATA_UNDRUN (scsi)\n");
libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
}
break;
case FC_DATA_OVRRUN:
/*
* overrun is an error
*/
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
"due to FC_DATA_OVRRUN\n");
sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
break;
case FC_CMD_ABORTED:
if (host_byte(sc_cmd->result) == DID_TIME_OUT)
FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml "
"due to FC_CMD_ABORTED\n");
else {
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
"due to FC_CMD_ABORTED\n");
set_host_byte(sc_cmd, DID_ERROR);
}
sc_cmd->result |= fsp->io_status;
break;
case FC_CMD_RESET:
FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
"due to FC_CMD_RESET\n");
sc_cmd->result = (DID_RESET << 16);
break;
case FC_TRANS_RESET:
FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml "
"due to FC_TRANS_RESET\n");
sc_cmd->result = (DID_SOFT_ERROR << 16);
break;
case FC_HRD_ERROR:
FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
"due to FC_HRD_ERROR\n");
sc_cmd->result = (DID_NO_CONNECT << 16);
break;
case FC_CRC_ERROR:
FC_FCP_DBG(fsp, "Returning DID_PARITY to scsi-ml "
"due to FC_CRC_ERROR\n");
sc_cmd->result = (DID_PARITY << 16);
break;
case FC_TIMED_OUT:
FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml "
"due to FC_TIMED_OUT\n");
sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
break;
default:
FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
"due to unknown error\n");
sc_cmd->result = (DID_ERROR << 16);
break;
}
if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE)
sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
list_del(&fsp->list);
libfc_priv(sc_cmd)->fsp = NULL;
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
scsi_done(sc_cmd);
/* release ref from initial allocation in queue command */
fc_fcp_pkt_release(fsp);
}
/**
* fc_eh_abort() - Abort a command
* @sc_cmd: The SCSI command to abort
*
* From SCSI host template.
* Send an ABTS to the target device and wait for the response.
*/
int fc_eh_abort(struct scsi_cmnd *sc_cmd)
{
struct fc_fcp_pkt *fsp;
struct fc_lport *lport;
struct fc_fcp_internal *si;
int rc = FAILED;
unsigned long flags;
int rval;
rval = fc_block_scsi_eh(sc_cmd);
if (rval)
return rval;
lport = shost_priv(sc_cmd->device->host);
if (lport->state != LPORT_ST_READY)
return rc;
else if (!lport->link_up)
return rc;
si = fc_get_scsi_internal(lport);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
fsp = libfc_priv(sc_cmd)->fsp;
if (!fsp) {
/* command completed while scsi eh was setting up */
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
return SUCCESS;
}
/* grab a ref so the fsp and sc_cmd cannot be released from under us */
fc_fcp_pkt_hold(fsp);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
if (fc_fcp_lock_pkt(fsp)) {
/* completed while we were waiting for timer to be deleted */
rc = SUCCESS;
goto release_pkt;
}
rc = fc_fcp_pkt_abort(fsp);
fc_fcp_unlock_pkt(fsp);
release_pkt:
fc_fcp_pkt_release(fsp);
return rc;
}
EXPORT_SYMBOL(fc_eh_abort);
/**
* fc_eh_device_reset() - Reset a single LUN
* @sc_cmd: The SCSI command which identifies the device whose
* LUN is to be reset
*
* Set from SCSI host template.
*/
int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
{
struct fc_lport *lport;
struct fc_fcp_pkt *fsp;
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
int rc = FAILED;
int rval;
rval = fc_block_scsi_eh(sc_cmd);
if (rval)
return rval;
lport = shost_priv(sc_cmd->device->host);
if (lport->state != LPORT_ST_READY)
return rc;
FC_SCSI_DBG(lport, "Resetting rport (%6.6x)\n", rport->port_id);
fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
if (fsp == NULL) {
printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
goto out;
}
/*
* Build the libfc request pkt. Do not set the scsi cmnd, because
* the sc passed in is not setup for execution like when sent
* through the queuecommand callout.
*/
fsp->rport = rport; /* set the remote port ptr */
/*
* flush outstanding commands
*/
rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
fsp->state = FC_SRB_FREE;
fc_fcp_pkt_release(fsp);
out:
return rc;
}
EXPORT_SYMBOL(fc_eh_device_reset);
/**
* fc_eh_host_reset() - Reset a Scsi_Host.
* @sc_cmd: The SCSI command that identifies the SCSI host to be reset
*/
int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
{
struct Scsi_Host *shost = sc_cmd->device->host;
struct fc_lport *lport = shost_priv(shost);
unsigned long wait_tmo;
FC_SCSI_DBG(lport, "Resetting host\n");
fc_lport_reset(lport);
wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
wait_tmo))
msleep(1000);
if (fc_fcp_lport_queue_ready(lport)) {
shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
"on port (%6.6x)\n", lport->port_id);
return SUCCESS;
} else {
shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
"port (%6.6x) is not ready.\n",
lport->port_id);
return FAILED;
}
}
EXPORT_SYMBOL(fc_eh_host_reset);
/**
* fc_slave_alloc() - Configure the queue depth of a Scsi_Host
* @sdev: The SCSI device that identifies the SCSI host
*
* Configures queue depth based on host's cmd_per_len. If not set
* then we use the libfc default.
*/
int fc_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
return 0;
}
EXPORT_SYMBOL(fc_slave_alloc);
/**
* fc_fcp_destroy() - Tear down the FCP layer for a given local port
* @lport: The local port that no longer needs the FCP layer
*/
void fc_fcp_destroy(struct fc_lport *lport)
{
struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
if (!list_empty(&si->scsi_pkt_queue))
printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
"port (%6.6x)\n", lport->port_id);
mempool_destroy(si->scsi_pkt_pool);
kfree(si);
lport->scsi_priv = NULL;
}
EXPORT_SYMBOL(fc_fcp_destroy);
int fc_setup_fcp(void)
{
int rc = 0;
scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
sizeof(struct fc_fcp_pkt),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!scsi_pkt_cachep) {
printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
"module load failed!");
rc = -ENOMEM;
}
return rc;
}
void fc_destroy_fcp(void)
{
kmem_cache_destroy(scsi_pkt_cachep);
}
/**
* fc_fcp_init() - Initialize the FCP layer for a local port
* @lport: The local port to initialize the exchange layer for
*/
int fc_fcp_init(struct fc_lport *lport)
{
int rc;
struct fc_fcp_internal *si;
if (!lport->tt.fcp_cmd_send)
lport->tt.fcp_cmd_send = fc_fcp_cmd_send;
if (!lport->tt.fcp_cleanup)
lport->tt.fcp_cleanup = fc_fcp_cleanup;
if (!lport->tt.fcp_abort_io)
lport->tt.fcp_abort_io = fc_fcp_abort_io;
si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
if (!si)
return -ENOMEM;
lport->scsi_priv = si;
si->max_can_queue = lport->host->can_queue;
INIT_LIST_HEAD(&si->scsi_pkt_queue);
spin_lock_init(&si->scsi_queue_lock);
si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
if (!si->scsi_pkt_pool) {
rc = -ENOMEM;
goto free_internal;
}
return 0;
free_internal:
kfree(si);
return rc;
}
EXPORT_SYMBOL(fc_fcp_init);
| linux-master | drivers/scsi/libfc/fc_fcp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2008 Intel Corporation. All rights reserved.
*
* Maintained at www.Open-FCoE.org
*/
/*
* Provide interface to send ELS/CT FC frames
*/
#include <linux/export.h>
#include <asm/unaligned.h>
#include <scsi/fc/fc_gs.h>
#include <scsi/fc/fc_ns.h>
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include "fc_encode.h"
#include "fc_libfc.h"
/**
* fc_elsct_send() - Send an ELS or CT frame
* @lport: The local port to send the frame on
* @did: The destination ID for the frame
* @fp: The frame to be sent
* @op: The operational code
* @resp: The callback routine when the response is received
* @arg: The argument to pass to the response callback routine
* @timer_msec: The timeout period for the frame (in msecs)
*/
struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
struct fc_frame *fp, unsigned int op,
void (*resp)(struct fc_seq *,
struct fc_frame *,
void *),
void *arg, u32 timer_msec)
{
enum fc_rctl r_ctl;
enum fc_fh_type fh_type;
int rc;
/* ELS requests */
if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
else {
/* CT requests */
rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type, &did);
}
if (rc) {
fc_frame_free(fp);
return NULL;
}
fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
FC_FCTL_REQ, 0);
return fc_exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
}
EXPORT_SYMBOL(fc_elsct_send);
/**
* fc_elsct_init() - Initialize the ELS/CT layer
* @lport: The local port to initialize the ELS/CT layer for
*/
int fc_elsct_init(struct fc_lport *lport)
{
if (!lport->tt.elsct_send)
lport->tt.elsct_send = fc_elsct_send;
return 0;
}
EXPORT_SYMBOL(fc_elsct_init);
/**
* fc_els_resp_type() - Return a string describing the ELS response
* @fp: The frame pointer or possible error code
*/
const char *fc_els_resp_type(struct fc_frame *fp)
{
const char *msg;
struct fc_frame_header *fh;
struct fc_ct_hdr *ct;
if (IS_ERR(fp)) {
switch (-PTR_ERR(fp)) {
case FC_NO_ERR:
msg = "response no error";
break;
case FC_EX_TIMEOUT:
msg = "response timeout";
break;
case FC_EX_CLOSED:
msg = "response closed";
break;
default:
msg = "response unknown error";
break;
}
} else {
fh = fc_frame_header_get(fp);
switch (fh->fh_type) {
case FC_TYPE_ELS:
switch (fc_frame_payload_op(fp)) {
case ELS_LS_ACC:
msg = "accept";
break;
case ELS_LS_RJT:
msg = "reject";
break;
default:
msg = "response unknown ELS";
break;
}
break;
case FC_TYPE_CT:
ct = fc_frame_payload_get(fp, sizeof(*ct));
if (ct) {
switch (ntohs(ct->ct_cmd)) {
case FC_FS_ACC:
msg = "CT accept";
break;
case FC_FS_RJT:
msg = "CT reject";
break;
default:
msg = "response unknown CT";
break;
}
} else {
msg = "short CT response";
}
break;
default:
msg = "response not ELS or CT";
break;
}
}
return msg;
}
| linux-master | drivers/scsi/libfc/fc_elsct.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright(c) 2009 Intel Corporation. All rights reserved.
*
* Maintained at www.Open-FCoE.org
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <linux/crc32.h>
#include <linux/module.h>
#include <scsi/libfc.h>
#include "fc_encode.h"
#include "fc_libfc.h"
MODULE_AUTHOR("Open-FCoE.org");
MODULE_DESCRIPTION("libfc");
MODULE_LICENSE("GPL v2");
unsigned int fc_debug_logging;
module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
DEFINE_MUTEX(fc_prov_mutex);
static LIST_HEAD(fc_local_ports);
struct blocking_notifier_head fc_lport_notifier_head =
BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head);
EXPORT_SYMBOL(fc_lport_notifier_head);
/*
* Providers which primarily send requests and PRLIs.
*/
struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = {
[0] = &fc_rport_t0_prov,
[FC_TYPE_FCP] = &fc_rport_fcp_init,
};
/*
* Providers which receive requests.
*/
struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = {
[FC_TYPE_ELS] = &fc_lport_els_prov,
};
/**
* libfc_init() - Initialize libfc.ko
*/
static int __init libfc_init(void)
{
int rc = 0;
rc = fc_setup_fcp();
if (rc)
return rc;
rc = fc_setup_exch_mgr();
if (rc)
goto destroy_pkt_cache;
rc = fc_setup_rport();
if (rc)
goto destroy_em;
return rc;
destroy_em:
fc_destroy_exch_mgr();
destroy_pkt_cache:
fc_destroy_fcp();
return rc;
}
module_init(libfc_init);
/**
* libfc_exit() - Tear down libfc.ko
*/
static void __exit libfc_exit(void)
{
fc_destroy_fcp();
fc_destroy_exch_mgr();
fc_destroy_rport();
}
module_exit(libfc_exit);
/**
* fc_copy_buffer_to_sglist() - This routine copies the data of a buffer
* into a scatter-gather list (SG list).
*
* @buf: pointer to the data buffer.
* @len: the byte-length of the data buffer.
* @sg: pointer to the pointer of the SG list.
* @nents: pointer to the remaining number of entries in the SG list.
* @offset: pointer to the current offset in the SG list.
* @crc: pointer to the 32-bit crc value.
* If crc is NULL, CRC is not calculated.
*/
u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
struct scatterlist *sg,
u32 *nents, size_t *offset,
u32 *crc)
{
size_t remaining = len;
u32 copy_len = 0;
while (remaining > 0 && sg) {
size_t off, sg_bytes;
void *page_addr;
if (*offset >= sg->length) {
/*
* Check for end and drop resources
* from the last iteration.
*/
if (!(*nents))
break;
--(*nents);
*offset -= sg->length;
sg = sg_next(sg);
continue;
}
sg_bytes = min(remaining, sg->length - *offset);
/*
* The scatterlist item may be bigger than PAGE_SIZE,
* but we are limited to mapping PAGE_SIZE at a time.
*/
off = *offset + sg->offset;
sg_bytes = min(sg_bytes,
(size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT));
if (crc)
*crc = crc32(*crc, buf, sg_bytes);
memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
kunmap_atomic(page_addr);
buf += sg_bytes;
*offset += sg_bytes;
remaining -= sg_bytes;
copy_len += sg_bytes;
}
return copy_len;
}
/**
* fc_fill_hdr() - fill FC header fields based on request
* @fp: reply frame containing header to be filled in
* @in_fp: request frame containing header to use in filling in reply
* @r_ctl: R_CTL value for header
* @f_ctl: F_CTL value for header, with 0 pad
* @seq_cnt: sequence count for the header, ignored if frame has a sequence
* @parm_offset: parameter / offset value
*/
void fc_fill_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
enum fc_rctl r_ctl, u32 f_ctl, u16 seq_cnt, u32 parm_offset)
{
struct fc_frame_header *fh;
struct fc_frame_header *in_fh;
struct fc_seq *sp;
u32 fill;
fh = __fc_frame_header_get(fp);
in_fh = __fc_frame_header_get(in_fp);
if (f_ctl & FC_FC_END_SEQ) {
fill = -fr_len(fp) & 3;
if (fill) {
/* TODO, this may be a problem with fragmented skb */
skb_put_zero(fp_skb(fp), fill);
f_ctl |= fill;
}
fr_eof(fp) = FC_EOF_T;
} else {
WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
fr_eof(fp) = FC_EOF_N;
}
fh->fh_r_ctl = r_ctl;
memcpy(fh->fh_d_id, in_fh->fh_s_id, sizeof(fh->fh_d_id));
memcpy(fh->fh_s_id, in_fh->fh_d_id, sizeof(fh->fh_s_id));
fh->fh_type = in_fh->fh_type;
hton24(fh->fh_f_ctl, f_ctl);
fh->fh_ox_id = in_fh->fh_ox_id;
fh->fh_rx_id = in_fh->fh_rx_id;
fh->fh_cs_ctl = 0;
fh->fh_df_ctl = 0;
fh->fh_parm_offset = htonl(parm_offset);
sp = fr_seq(in_fp);
if (sp) {
fr_seq(fp) = sp;
fh->fh_seq_id = sp->id;
seq_cnt = sp->cnt;
} else {
fh->fh_seq_id = 0;
}
fh->fh_seq_cnt = ntohs(seq_cnt);
fr_sof(fp) = seq_cnt ? FC_SOF_N3 : FC_SOF_I3;
fr_encaps(fp) = fr_encaps(in_fp);
}
EXPORT_SYMBOL(fc_fill_hdr);
/**
* fc_fill_reply_hdr() - fill FC reply header fields based on request
* @fp: reply frame containing header to be filled in
* @in_fp: request frame containing header to use in filling in reply
* @r_ctl: R_CTL value for reply
* @parm_offset: parameter / offset value
*/
void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
enum fc_rctl r_ctl, u32 parm_offset)
{
struct fc_seq *sp;
sp = fr_seq(in_fp);
if (sp)
fr_seq(fp) = fc_seq_start_next(sp);
fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
}
EXPORT_SYMBOL(fc_fill_reply_hdr);
/**
* fc_fc4_conf_lport_params() - Modify "service_params" of specified lport
* if there is service provider (target provider) registered with libfc
* for specified "fc_ft_type"
* @lport: Local port which service_params needs to be modified
* @type: FC-4 type, such as FC_TYPE_FCP
*/
void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type)
{
struct fc4_prov *prov_entry;
BUG_ON(type >= FC_FC4_PROV_SIZE);
BUG_ON(!lport);
prov_entry = fc_passive_prov[type];
if (type == FC_TYPE_FCP) {
if (prov_entry && prov_entry->recv)
lport->service_params |= FCP_SPPF_TARG_FCN;
}
}
void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg)
{
struct fc_lport *lport;
mutex_lock(&fc_prov_mutex);
list_for_each_entry(lport, &fc_local_ports, lport_list)
notify(lport, arg);
mutex_unlock(&fc_prov_mutex);
}
EXPORT_SYMBOL(fc_lport_iterate);
/**
* fc_fc4_register_provider() - register FC-4 upper-level provider.
* @type: FC-4 type, such as FC_TYPE_FCP
* @prov: structure describing provider including ops vector.
*
* Returns 0 on success, negative error otherwise.
*/
int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov)
{
struct fc4_prov **prov_entry;
int ret = 0;
if (type >= FC_FC4_PROV_SIZE)
return -EINVAL;
mutex_lock(&fc_prov_mutex);
prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type;
if (*prov_entry)
ret = -EBUSY;
else
*prov_entry = prov;
mutex_unlock(&fc_prov_mutex);
return ret;
}
EXPORT_SYMBOL(fc_fc4_register_provider);
/**
* fc_fc4_deregister_provider() - deregister FC-4 upper-level provider.
* @type: FC-4 type, such as FC_TYPE_FCP
* @prov: structure describing provider including ops vector.
*/
void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
{
BUG_ON(type >= FC_FC4_PROV_SIZE);
mutex_lock(&fc_prov_mutex);
if (prov->recv)
RCU_INIT_POINTER(fc_passive_prov[type], NULL);
else
RCU_INIT_POINTER(fc_active_prov[type], NULL);
mutex_unlock(&fc_prov_mutex);
synchronize_rcu();
}
EXPORT_SYMBOL(fc_fc4_deregister_provider);
/**
* fc_fc4_add_lport() - add new local port to list and run notifiers.
* @lport: The new local port.
*/
void fc_fc4_add_lport(struct fc_lport *lport)
{
mutex_lock(&fc_prov_mutex);
list_add_tail(&lport->lport_list, &fc_local_ports);
blocking_notifier_call_chain(&fc_lport_notifier_head,
FC_LPORT_EV_ADD, lport);
mutex_unlock(&fc_prov_mutex);
}
/**
* fc_fc4_del_lport() - remove local port from list and run notifiers.
* @lport: The new local port.
*/
void fc_fc4_del_lport(struct fc_lport *lport)
{
mutex_lock(&fc_prov_mutex);
list_del(&lport->lport_list);
blocking_notifier_call_chain(&fc_lport_notifier_head,
FC_LPORT_EV_DEL, lport);
mutex_unlock(&fc_prov_mutex);
}
| linux-master | drivers/scsi/libfc/fc_libfc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Serial Attached SCSI (SAS) Expander discovery and configuration
*
* Copyright (C) 2007 James E.J. Bottomley
* <[email protected]>
*/
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "sas_internal.h"
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "scsi_sas_internal.h"
static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data,
u8 phy_id)
{
struct sas_phy *phy;
struct sas_rphy *rphy;
if (phy_id >= sas_ha->num_phys) {
resp_data[2] = SMP_RESP_NO_PHY;
return;
}
resp_data[2] = SMP_RESP_FUNC_ACC;
phy = sas_ha->sas_phy[phy_id]->phy;
resp_data[9] = phy_id;
resp_data[13] = phy->negotiated_linkrate;
memcpy(resp_data + 16, sas_ha->sas_addr, SAS_ADDR_SIZE);
memcpy(resp_data + 24, sas_ha->sas_phy[phy_id]->attached_sas_addr,
SAS_ADDR_SIZE);
resp_data[40] = (phy->minimum_linkrate << 4) |
phy->minimum_linkrate_hw;
resp_data[41] = (phy->maximum_linkrate << 4) |
phy->maximum_linkrate_hw;
if (!sas_ha->sas_phy[phy_id]->port ||
!sas_ha->sas_phy[phy_id]->port->port_dev)
return;
rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy;
resp_data[12] = rphy->identify.device_type << 4;
resp_data[14] = rphy->identify.initiator_port_protocols;
resp_data[15] = rphy->identify.target_port_protocols;
}
/**
* to_sas_gpio_gp_bit - given the gpio frame data find the byte/bit position of 'od'
* @od: od bit to find
* @data: incoming bitstream (from frame)
* @index: requested data register index (from frame)
* @count: total number of registers in the bitstream (from frame)
* @bit: bit position of 'od' in the returned byte
*
* returns NULL if 'od' is not in 'data'
*
* From SFF-8485 v0.7:
* "In GPIO_TX[1], bit 0 of byte 3 contains the first bit (i.e., OD0.0)
* and bit 7 of byte 0 contains the 32nd bit (i.e., OD10.1).
*
* In GPIO_TX[2], bit 0 of byte 3 contains the 33rd bit (i.e., OD10.2)
* and bit 7 of byte 0 contains the 64th bit (i.e., OD21.0)."
*
* The general-purpose (raw-bitstream) RX registers have the same layout
* although 'od' is renamed 'id' for 'input data'.
*
* SFF-8489 defines the behavior of the LEDs in response to the 'od' values.
*/
static u8 *to_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count, u8 *bit)
{
unsigned int reg;
u8 byte;
/* gp registers start at index 1 */
if (index == 0)
return NULL;
index--; /* make index 0-based */
if (od < index * 32)
return NULL;
od -= index * 32;
reg = od >> 5;
if (reg >= count)
return NULL;
od &= (1 << 5) - 1;
byte = 3 - (od >> 3);
*bit = od & ((1 << 3) - 1);
return &data[reg * 4 + byte];
}
int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count)
{
u8 *byte;
u8 bit;
byte = to_sas_gpio_gp_bit(od, data, index, count, &bit);
if (!byte)
return -1;
return (*byte >> bit) & 1;
}
EXPORT_SYMBOL(try_test_sas_gpio_gp_bit);
static int sas_host_smp_write_gpio(struct sas_ha_struct *sas_ha, u8 *resp_data,
u8 reg_type, u8 reg_index, u8 reg_count,
u8 *req_data)
{
struct sas_internal *i = to_sas_internal(sas_ha->shost->transportt);
int written;
if (i->dft->lldd_write_gpio == NULL) {
resp_data[2] = SMP_RESP_FUNC_UNK;
return 0;
}
written = i->dft->lldd_write_gpio(sas_ha, reg_type, reg_index,
reg_count, req_data);
if (written < 0) {
resp_data[2] = SMP_RESP_FUNC_FAILED;
written = 0;
} else
resp_data[2] = SMP_RESP_FUNC_ACC;
return written;
}
static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data,
u8 phy_id)
{
struct sas_rphy *rphy;
struct dev_to_host_fis *fis;
int i;
if (phy_id >= sas_ha->num_phys) {
resp_data[2] = SMP_RESP_NO_PHY;
return;
}
resp_data[2] = SMP_RESP_PHY_NO_SATA;
if (!sas_ha->sas_phy[phy_id]->port)
return;
rphy = sas_ha->sas_phy[phy_id]->port->port_dev->rphy;
fis = (struct dev_to_host_fis *)
sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd;
if (rphy->identify.target_port_protocols != SAS_PROTOCOL_SATA)
return;
resp_data[2] = SMP_RESP_FUNC_ACC;
resp_data[9] = phy_id;
memcpy(resp_data + 16, sas_ha->sas_phy[phy_id]->attached_sas_addr,
SAS_ADDR_SIZE);
/* check to see if we have a valid d2h fis */
if (fis->fis_type != 0x34)
return;
/* the d2h fis is required by the standard to be in LE format */
for (i = 0; i < 20; i += 4) {
u8 *dst = resp_data + 24 + i, *src =
&sas_ha->sas_phy[phy_id]->port->port_dev->frame_rcvd[i];
dst[0] = src[3];
dst[1] = src[2];
dst[2] = src[1];
dst[3] = src[0];
}
}
static void sas_phy_control(struct sas_ha_struct *sas_ha, u8 phy_id,
u8 phy_op, enum sas_linkrate min,
enum sas_linkrate max, u8 *resp_data)
{
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
struct sas_phy_linkrates rates;
struct asd_sas_phy *asd_phy;
if (phy_id >= sas_ha->num_phys) {
resp_data[2] = SMP_RESP_NO_PHY;
return;
}
asd_phy = sas_ha->sas_phy[phy_id];
switch (phy_op) {
case PHY_FUNC_NOP:
case PHY_FUNC_LINK_RESET:
case PHY_FUNC_HARD_RESET:
case PHY_FUNC_DISABLE:
case PHY_FUNC_CLEAR_ERROR_LOG:
case PHY_FUNC_CLEAR_AFFIL:
case PHY_FUNC_TX_SATA_PS_SIGNAL:
break;
default:
resp_data[2] = SMP_RESP_PHY_UNK_OP;
return;
}
rates.minimum_linkrate = min;
rates.maximum_linkrate = max;
/* filter reset requests through libata eh */
if (phy_op == PHY_FUNC_LINK_RESET && sas_try_ata_reset(asd_phy) == 0) {
resp_data[2] = SMP_RESP_FUNC_ACC;
return;
}
if (i->dft->lldd_control_phy(asd_phy, phy_op, &rates))
resp_data[2] = SMP_RESP_FUNC_FAILED;
else
resp_data[2] = SMP_RESP_FUNC_ACC;
}
void sas_smp_host_handler(struct bsg_job *job, struct Scsi_Host *shost)
{
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
u8 *req_data, *resp_data;
unsigned int reslen = 0;
int error = -EINVAL;
/* eight is the minimum size for request and response frames */
if (job->request_payload.payload_len < 8 ||
job->reply_payload.payload_len < 8)
goto out;
error = -ENOMEM;
req_data = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
if (!req_data)
goto out;
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt, req_data,
job->request_payload.payload_len);
/* make sure frame can always be built ... we copy
* back only the requested length */
resp_data = kzalloc(max(job->reply_payload.payload_len, 128U),
GFP_KERNEL);
if (!resp_data)
goto out_free_req;
error = -EINVAL;
if (req_data[0] != SMP_REQUEST)
goto out_free_resp;
/* set up default don't know response */
resp_data[0] = SMP_RESPONSE;
resp_data[1] = req_data[1];
resp_data[2] = SMP_RESP_FUNC_UNK;
switch (req_data[1]) {
case SMP_REPORT_GENERAL:
resp_data[2] = SMP_RESP_FUNC_ACC;
resp_data[9] = sas_ha->num_phys;
reslen = 32;
break;
case SMP_REPORT_MANUF_INFO:
resp_data[2] = SMP_RESP_FUNC_ACC;
memcpy(resp_data + 12, shost->hostt->name,
SAS_EXPANDER_VENDOR_ID_LEN);
memcpy(resp_data + 20, "libsas virt phy",
SAS_EXPANDER_PRODUCT_ID_LEN);
reslen = 64;
break;
case SMP_READ_GPIO_REG:
/* FIXME: need GPIO support in the transport class */
break;
case SMP_DISCOVER:
if (job->request_payload.payload_len < 16)
goto out_free_resp;
sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
reslen = 56;
break;
case SMP_REPORT_PHY_ERR_LOG:
/* FIXME: could implement this with additional
* libsas callbacks providing the HW supports it */
break;
case SMP_REPORT_PHY_SATA:
if (job->request_payload.payload_len < 16)
goto out_free_resp;
sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
reslen = 60;
break;
case SMP_REPORT_ROUTE_INFO:
/* Can't implement; hosts have no routes */
break;
case SMP_WRITE_GPIO_REG: {
/* SFF-8485 v0.7 */
const int base_frame_size = 11;
int to_write = req_data[4];
if (job->request_payload.payload_len <
base_frame_size + to_write * 4) {
resp_data[2] = SMP_RESP_INV_FRM_LEN;
break;
}
to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2],
req_data[3], to_write, &req_data[8]);
reslen = 8;
break;
}
case SMP_CONF_ROUTE_INFO:
/* Can't implement; hosts have no routes */
break;
case SMP_PHY_CONTROL:
if (job->request_payload.payload_len < 44)
goto out_free_resp;
sas_phy_control(sas_ha, req_data[9], req_data[10],
req_data[32] >> 4, req_data[33] >> 4,
resp_data);
reslen = 8;
break;
case SMP_PHY_TEST_FUNCTION:
/* FIXME: should this be implemented? */
break;
default:
/* probably a 2.0 function */
break;
}
sg_copy_from_buffer(job->reply_payload.sg_list,
job->reply_payload.sg_cnt, resp_data,
job->reply_payload.payload_len);
error = 0;
out_free_resp:
kfree(resp_data);
out_free_req:
kfree(req_data);
out:
bsg_job_done(job, error, reslen);
}
| linux-master | drivers/scsi/libsas/sas_host_smp.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Serial Attached SCSI (SAS) Expander discovery and configuration
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <[email protected]>
*
* This file is licensed under GPLv2.
*/
#include <linux/scatterlist.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include "sas_internal.h"
#include <scsi/sas_ata.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "scsi_sas_internal.h"
static int sas_discover_expander(struct domain_device *dev);
static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr);
static int sas_configure_phy(struct domain_device *dev, int phy_id,
u8 *sas_addr, int include);
static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr);
/* ---------- SMP task management ---------- */
/* Give it some long enough timeout. In seconds. */
#define SMP_TIMEOUT 10
static int smp_execute_task_sg(struct domain_device *dev,
struct scatterlist *req, struct scatterlist *resp)
{
int res, retry;
struct sas_task *task = NULL;
struct sas_internal *i =
to_sas_internal(dev->port->ha->shost->transportt);
struct sas_ha_struct *ha = dev->port->ha;
pm_runtime_get_sync(ha->dev);
mutex_lock(&dev->ex_dev.cmd_mutex);
for (retry = 0; retry < 3; retry++) {
if (test_bit(SAS_DEV_GONE, &dev->state)) {
res = -ECOMM;
break;
}
task = sas_alloc_slow_task(GFP_KERNEL);
if (!task) {
res = -ENOMEM;
break;
}
task->dev = dev;
task->task_proto = dev->tproto;
task->smp_task.smp_req = *req;
task->smp_task.smp_resp = *resp;
task->task_done = sas_task_internal_done;
task->slow_task->timer.function = sas_task_internal_timedout;
task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
del_timer_sync(&task->slow_task->timer);
pr_notice("executing SMP task failed:%d\n", res);
break;
}
wait_for_completion(&task->slow_task->completion);
res = -ECOMM;
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
pr_notice("smp task timed out or aborted\n");
i->dft->lldd_abort_task(task);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
pr_notice("SMP task aborted and not done\n");
break;
}
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = 0;
break;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_DATA_UNDERRUN) {
/* no error, but return the number of bytes of
* underrun */
res = task->task_status.residual;
break;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_DATA_OVERRUN) {
res = -EMSGSIZE;
break;
}
if (task->task_status.resp == SAS_TASK_UNDELIVERED &&
task->task_status.stat == SAS_DEVICE_UNKNOWN)
break;
else {
pr_notice("%s: task to dev %016llx response: 0x%x status 0x%x\n",
__func__,
SAS_ADDR(dev->sas_addr),
task->task_status.resp,
task->task_status.stat);
sas_free_task(task);
task = NULL;
}
}
mutex_unlock(&dev->ex_dev.cmd_mutex);
pm_runtime_put_sync(ha->dev);
BUG_ON(retry == 3 && task != NULL);
sas_free_task(task);
return res;
}
static int smp_execute_task(struct domain_device *dev, void *req, int req_size,
void *resp, int resp_size)
{
struct scatterlist req_sg;
struct scatterlist resp_sg;
sg_init_one(&req_sg, req, req_size);
sg_init_one(&resp_sg, resp, resp_size);
return smp_execute_task_sg(dev, &req_sg, &resp_sg);
}
/* ---------- Allocations ---------- */
static inline void *alloc_smp_req(int size)
{
u8 *p = kzalloc(size, GFP_KERNEL);
if (p)
p[0] = SMP_REQUEST;
return p;
}
static inline void *alloc_smp_resp(int size)
{
return kzalloc(size, GFP_KERNEL);
}
static char sas_route_char(struct domain_device *dev, struct ex_phy *phy)
{
switch (phy->routing_attr) {
case TABLE_ROUTING:
if (dev->ex_dev.t2t_supp)
return 'U';
else
return 'T';
case DIRECT_ROUTING:
return 'D';
case SUBTRACTIVE_ROUTING:
return 'S';
default:
return '?';
}
}
static enum sas_device_type to_dev_type(struct discover_resp *dr)
{
/* This is detecting a failure to transmit initial dev to host
* FIS as described in section J.5 of sas-2 r16
*/
if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev &&
dr->linkrate >= SAS_LINK_RATE_1_5_GBPS)
return SAS_SATA_PENDING;
else
return dr->attached_dev_type;
}
static void sas_set_ex_phy(struct domain_device *dev, int phy_id,
struct smp_disc_resp *disc_resp)
{
enum sas_device_type dev_type;
enum sas_linkrate linkrate;
u8 sas_addr[SAS_ADDR_SIZE];
struct discover_resp *dr = &disc_resp->disc;
struct sas_ha_struct *ha = dev->port->ha;
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
struct sas_rphy *rphy = dev->rphy;
bool new_phy = !phy->phy;
char *type;
if (new_phy) {
if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)))
return;
phy->phy = sas_phy_alloc(&rphy->dev, phy_id);
/* FIXME: error_handling */
BUG_ON(!phy->phy);
}
switch (disc_resp->result) {
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
break;
default:
phy->phy_state = PHY_NOT_PRESENT;
break;
case SMP_RESP_FUNC_ACC:
phy->phy_state = PHY_EMPTY; /* do not know yet */
break;
}
/* check if anything important changed to squelch debug */
dev_type = phy->attached_dev_type;
linkrate = phy->linkrate;
memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
/* Handle vacant phy - rest of dr data is not valid so skip it */
if (phy->phy_state == PHY_VACANT) {
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
phy->attached_dev_type = SAS_PHY_UNUSED;
if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
phy->phy_id = phy_id;
goto skip;
} else
goto out;
}
phy->attached_dev_type = to_dev_type(dr);
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
goto out;
phy->phy_id = phy_id;
phy->linkrate = dr->linkrate;
phy->attached_sata_host = dr->attached_sata_host;
phy->attached_sata_dev = dr->attached_sata_dev;
phy->attached_sata_ps = dr->attached_sata_ps;
phy->attached_iproto = dr->iproto << 1;
phy->attached_tproto = dr->tproto << 1;
/* help some expanders that fail to zero sas_address in the 'no
* device' case
*/
if (phy->attached_dev_type == SAS_PHY_UNUSED ||
phy->linkrate < SAS_LINK_RATE_1_5_GBPS)
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
else
memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE);
phy->attached_phy_id = dr->attached_phy_id;
phy->phy_change_count = dr->change_count;
phy->routing_attr = dr->routing_attr;
phy->virtual = dr->virtual;
phy->last_da_index = -1;
phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr);
phy->phy->identify.device_type = dr->attached_dev_type;
phy->phy->identify.initiator_port_protocols = phy->attached_iproto;
phy->phy->identify.target_port_protocols = phy->attached_tproto;
if (!phy->attached_tproto && dr->attached_sata_dev)
phy->phy->identify.target_port_protocols = SAS_PROTOCOL_SATA;
phy->phy->identify.phy_identifier = phy_id;
phy->phy->minimum_linkrate_hw = dr->hmin_linkrate;
phy->phy->maximum_linkrate_hw = dr->hmax_linkrate;
phy->phy->minimum_linkrate = dr->pmin_linkrate;
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);
skip:
if (new_phy)
if (sas_phy_add(phy->phy)) {
sas_phy_free(phy->phy);
return;
}
out:
switch (phy->attached_dev_type) {
case SAS_SATA_PENDING:
type = "stp pending";
break;
case SAS_PHY_UNUSED:
type = "no device";
break;
case SAS_END_DEVICE:
if (phy->attached_iproto) {
if (phy->attached_tproto)
type = "host+target";
else
type = "host";
} else {
if (dr->attached_sata_dev)
type = "stp";
else
type = "ssp";
}
break;
case SAS_EDGE_EXPANDER_DEVICE:
case SAS_FANOUT_EXPANDER_DEVICE:
type = "smp";
break;
default:
type = "unknown";
}
/* this routine is polled by libata error recovery so filter
* unimportant messages
*/
if (new_phy || phy->attached_dev_type != dev_type ||
phy->linkrate != linkrate ||
SAS_ADDR(phy->attached_sas_addr) != SAS_ADDR(sas_addr))
/* pass */;
else
return;
/* if the attached device type changed and ata_eh is active,
* make sure we run revalidation when eh completes (see:
* sas_enable_revalidation)
*/
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending);
pr_debug("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n",
test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "",
SAS_ADDR(dev->sas_addr), phy->phy_id,
sas_route_char(dev, phy), phy->linkrate,
SAS_ADDR(phy->attached_sas_addr), type);
}
/* check if we have an existing attached ata device on this expander phy */
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id)
{
struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy_id];
struct domain_device *dev;
struct sas_rphy *rphy;
if (!ex_phy->port)
return NULL;
rphy = ex_phy->port->rphy;
if (!rphy)
return NULL;
dev = sas_find_dev_by_rphy(rphy);
if (dev && dev_is_sata(dev))
return dev;
return NULL;
}
#define DISCOVER_REQ_SIZE 16
#define DISCOVER_RESP_SIZE sizeof(struct smp_disc_resp)
static int sas_ex_phy_discover_helper(struct domain_device *dev, u8 *disc_req,
struct smp_disc_resp *disc_resp,
int single)
{
struct discover_resp *dr = &disc_resp->disc;
int res;
disc_req[9] = single;
res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
return res;
if (memcmp(dev->sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE) == 0) {
pr_notice("Found loopback topology, just ignore it!\n");
return 0;
}
sas_set_ex_phy(dev, single, disc_resp);
return 0;
}
int sas_ex_phy_discover(struct domain_device *dev, int single)
{
struct expander_device *ex = &dev->ex_dev;
int res = 0;
u8 *disc_req;
struct smp_disc_resp *disc_resp;
disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
if (!disc_req)
return -ENOMEM;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp) {
kfree(disc_req);
return -ENOMEM;
}
disc_req[1] = SMP_DISCOVER;
if (0 <= single && single < ex->num_phys) {
res = sas_ex_phy_discover_helper(dev, disc_req, disc_resp, single);
} else {
int i;
for (i = 0; i < ex->num_phys; i++) {
res = sas_ex_phy_discover_helper(dev, disc_req,
disc_resp, i);
if (res)
goto out_err;
}
}
out_err:
kfree(disc_resp);
kfree(disc_req);
return res;
}
static int sas_expander_discover(struct domain_device *dev)
{
struct expander_device *ex = &dev->ex_dev;
int res;
ex->ex_phy = kcalloc(ex->num_phys, sizeof(*ex->ex_phy), GFP_KERNEL);
if (!ex->ex_phy)
return -ENOMEM;
res = sas_ex_phy_discover(dev, -1);
if (res)
goto out_err;
return 0;
out_err:
kfree(ex->ex_phy);
ex->ex_phy = NULL;
return res;
}
#define MAX_EXPANDER_PHYS 128
#define RG_REQ_SIZE 8
#define RG_RESP_SIZE sizeof(struct smp_rg_resp)
static int sas_ex_general(struct domain_device *dev)
{
u8 *rg_req;
struct smp_rg_resp *rg_resp;
struct report_general_resp *rg;
int res;
int i;
rg_req = alloc_smp_req(RG_REQ_SIZE);
if (!rg_req)
return -ENOMEM;
rg_resp = alloc_smp_resp(RG_RESP_SIZE);
if (!rg_resp) {
kfree(rg_req);
return -ENOMEM;
}
rg_req[1] = SMP_REPORT_GENERAL;
for (i = 0; i < 5; i++) {
res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp,
RG_RESP_SIZE);
if (res) {
pr_notice("RG to ex %016llx failed:0x%x\n",
SAS_ADDR(dev->sas_addr), res);
goto out;
} else if (rg_resp->result != SMP_RESP_FUNC_ACC) {
pr_debug("RG:ex %016llx returned SMP result:0x%x\n",
SAS_ADDR(dev->sas_addr), rg_resp->result);
res = rg_resp->result;
goto out;
}
rg = &rg_resp->rg;
dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count);
dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes);
dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS);
dev->ex_dev.t2t_supp = rg->t2t_supp;
dev->ex_dev.conf_route_table = rg->conf_route_table;
dev->ex_dev.configuring = rg->configuring;
memcpy(dev->ex_dev.enclosure_logical_id,
rg->enclosure_logical_id, 8);
if (dev->ex_dev.configuring) {
pr_debug("RG: ex %016llx self-configuring...\n",
SAS_ADDR(dev->sas_addr));
schedule_timeout_interruptible(5*HZ);
} else
break;
}
out:
kfree(rg_req);
kfree(rg_resp);
return res;
}
static void ex_assign_manuf_info(struct domain_device *dev, void
*_mi_resp)
{
u8 *mi_resp = _mi_resp;
struct sas_rphy *rphy = dev->rphy;
struct sas_expander_device *edev = rphy_to_expander_device(rphy);
memcpy(edev->vendor_id, mi_resp + 12, SAS_EXPANDER_VENDOR_ID_LEN);
memcpy(edev->product_id, mi_resp + 20, SAS_EXPANDER_PRODUCT_ID_LEN);
memcpy(edev->product_rev, mi_resp + 36,
SAS_EXPANDER_PRODUCT_REV_LEN);
if (mi_resp[8] & 1) {
memcpy(edev->component_vendor_id, mi_resp + 40,
SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
edev->component_id = mi_resp[48] << 8 | mi_resp[49];
edev->component_revision_id = mi_resp[50];
}
}
#define MI_REQ_SIZE 8
#define MI_RESP_SIZE 64
static int sas_ex_manuf_info(struct domain_device *dev)
{
u8 *mi_req;
u8 *mi_resp;
int res;
mi_req = alloc_smp_req(MI_REQ_SIZE);
if (!mi_req)
return -ENOMEM;
mi_resp = alloc_smp_resp(MI_RESP_SIZE);
if (!mi_resp) {
kfree(mi_req);
return -ENOMEM;
}
mi_req[1] = SMP_REPORT_MANUF_INFO;
res = smp_execute_task(dev, mi_req, MI_REQ_SIZE, mi_resp, MI_RESP_SIZE);
if (res) {
pr_notice("MI: ex %016llx failed:0x%x\n",
SAS_ADDR(dev->sas_addr), res);
goto out;
} else if (mi_resp[2] != SMP_RESP_FUNC_ACC) {
pr_debug("MI ex %016llx returned SMP result:0x%x\n",
SAS_ADDR(dev->sas_addr), mi_resp[2]);
goto out;
}
ex_assign_manuf_info(dev, mi_resp);
out:
kfree(mi_req);
kfree(mi_resp);
return res;
}
#define PC_REQ_SIZE 44
#define PC_RESP_SIZE 8
int sas_smp_phy_control(struct domain_device *dev, int phy_id,
enum phy_func phy_func,
struct sas_phy_linkrates *rates)
{
u8 *pc_req;
u8 *pc_resp;
int res;
pc_req = alloc_smp_req(PC_REQ_SIZE);
if (!pc_req)
return -ENOMEM;
pc_resp = alloc_smp_resp(PC_RESP_SIZE);
if (!pc_resp) {
kfree(pc_req);
return -ENOMEM;
}
pc_req[1] = SMP_PHY_CONTROL;
pc_req[9] = phy_id;
pc_req[10] = phy_func;
if (rates) {
pc_req[32] = rates->minimum_linkrate << 4;
pc_req[33] = rates->maximum_linkrate << 4;
}
res = smp_execute_task(dev, pc_req, PC_REQ_SIZE, pc_resp, PC_RESP_SIZE);
if (res) {
pr_err("ex %016llx phy%02d PHY control failed: %d\n",
SAS_ADDR(dev->sas_addr), phy_id, res);
} else if (pc_resp[2] != SMP_RESP_FUNC_ACC) {
pr_err("ex %016llx phy%02d PHY control failed: function result 0x%x\n",
SAS_ADDR(dev->sas_addr), phy_id, pc_resp[2]);
res = pc_resp[2];
}
kfree(pc_resp);
kfree(pc_req);
return res;
}
static void sas_ex_disable_phy(struct domain_device *dev, int phy_id)
{
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
sas_smp_phy_control(dev, phy_id, PHY_FUNC_DISABLE, NULL);
phy->linkrate = SAS_PHY_DISABLED;
}
static void sas_ex_disable_port(struct domain_device *dev, u8 *sas_addr)
{
struct expander_device *ex = &dev->ex_dev;
int i;
for (i = 0; i < ex->num_phys; i++) {
struct ex_phy *phy = &ex->ex_phy[i];
if (phy->phy_state == PHY_VACANT ||
phy->phy_state == PHY_NOT_PRESENT)
continue;
if (SAS_ADDR(phy->attached_sas_addr) == SAS_ADDR(sas_addr))
sas_ex_disable_phy(dev, i);
}
}
static int sas_dev_present_in_domain(struct asd_sas_port *port,
u8 *sas_addr)
{
struct domain_device *dev;
if (SAS_ADDR(port->sas_addr) == SAS_ADDR(sas_addr))
return 1;
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
if (SAS_ADDR(dev->sas_addr) == SAS_ADDR(sas_addr))
return 1;
}
return 0;
}
#define RPEL_REQ_SIZE 16
#define RPEL_RESP_SIZE 32
int sas_smp_get_phy_events(struct sas_phy *phy)
{
int res;
u8 *req;
u8 *resp;
struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
struct domain_device *dev = sas_find_dev_by_rphy(rphy);
req = alloc_smp_req(RPEL_REQ_SIZE);
if (!req)
return -ENOMEM;
resp = alloc_smp_resp(RPEL_RESP_SIZE);
if (!resp) {
kfree(req);
return -ENOMEM;
}
req[1] = SMP_REPORT_PHY_ERR_LOG;
req[9] = phy->number;
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);
if (res)
goto out;
phy->invalid_dword_count = get_unaligned_be32(&resp[12]);
phy->running_disparity_error_count = get_unaligned_be32(&resp[16]);
phy->loss_of_dword_sync_count = get_unaligned_be32(&resp[20]);
phy->phy_reset_problem_count = get_unaligned_be32(&resp[24]);
out:
kfree(req);
kfree(resp);
return res;
}
#ifdef CONFIG_SCSI_SAS_ATA
#define RPS_REQ_SIZE 16
#define RPS_RESP_SIZE sizeof(struct smp_rps_resp)
int sas_get_report_phy_sata(struct domain_device *dev, int phy_id,
struct smp_rps_resp *rps_resp)
{
int res;
u8 *rps_req = alloc_smp_req(RPS_REQ_SIZE);
u8 *resp = (u8 *)rps_resp;
if (!rps_req)
return -ENOMEM;
rps_req[1] = SMP_REPORT_PHY_SATA;
rps_req[9] = phy_id;
res = smp_execute_task(dev, rps_req, RPS_REQ_SIZE,
rps_resp, RPS_RESP_SIZE);
/* 0x34 is the FIS type for the D2H fis. There's a potential
* standards cockup here. sas-2 explicitly specifies the FIS
* should be encoded so that FIS type is in resp[24].
* However, some expanders endian reverse this. Undo the
* reversal here */
if (!res && resp[27] == 0x34 && resp[24] != 0x34) {
int i;
for (i = 0; i < 5; i++) {
int j = 24 + (i*4);
u8 a, b;
a = resp[j + 0];
b = resp[j + 1];
resp[j + 0] = resp[j + 3];
resp[j + 1] = resp[j + 2];
resp[j + 2] = b;
resp[j + 3] = a;
}
}
kfree(rps_req);
return res;
}
#endif
static void sas_ex_get_linkrate(struct domain_device *parent,
struct domain_device *child,
struct ex_phy *parent_phy)
{
struct expander_device *parent_ex = &parent->ex_dev;
struct sas_port *port;
int i;
child->pathways = 0;
port = parent_phy->port;
for (i = 0; i < parent_ex->num_phys; i++) {
struct ex_phy *phy = &parent_ex->ex_phy[i];
if (phy->phy_state == PHY_VACANT ||
phy->phy_state == PHY_NOT_PRESENT)
continue;
if (sas_phy_match_dev_addr(child, phy)) {
child->min_linkrate = min(parent->min_linkrate,
phy->linkrate);
child->max_linkrate = max(parent->max_linkrate,
phy->linkrate);
child->pathways++;
sas_port_add_phy(port, phy->phy);
}
}
child->linkrate = min(parent_phy->linkrate, child->max_linkrate);
child->pathways = min(child->pathways, parent->pathways);
}
static int sas_ex_add_dev(struct domain_device *parent, struct ex_phy *phy,
struct domain_device *child, int phy_id)
{
struct sas_rphy *rphy;
int res;
child->dev_type = SAS_END_DEVICE;
rphy = sas_end_device_alloc(phy->port);
if (!rphy)
return -ENOMEM;
child->tproto = phy->attached_tproto;
sas_init_dev(child);
child->rphy = rphy;
get_device(&rphy->dev);
rphy->identify.phy_identifier = phy_id;
sas_fill_in_rphy(child, rphy);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
res = sas_notify_lldd_dev_found(child);
if (res) {
pr_notice("notify lldd for device %016llx at %016llx:%02d returned 0x%x\n",
SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, res);
sas_rphy_free(child->rphy);
list_del(&child->disco_list_node);
return res;
}
return 0;
}
static struct domain_device *sas_ex_discover_end_dev(
struct domain_device *parent, int phy_id)
{
struct expander_device *parent_ex = &parent->ex_dev;
struct ex_phy *phy = &parent_ex->ex_phy[phy_id];
struct domain_device *child = NULL;
int res;
if (phy->attached_sata_host || phy->attached_sata_ps)
return NULL;
child = sas_alloc_device();
if (!child)
return NULL;
kref_get(&parent->kref);
child->parent = parent;
child->port = parent->port;
child->iproto = phy->attached_iproto;
memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
if (!phy->port) {
phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
if (unlikely(!phy->port))
goto out_err;
if (unlikely(sas_port_add(phy->port) != 0)) {
sas_port_free(phy->port);
goto out_err;
}
}
sas_ex_get_linkrate(parent, child, phy);
sas_device_set_phy(child, phy->port);
if ((phy->attached_tproto & SAS_PROTOCOL_STP) || phy->attached_sata_dev) {
res = sas_ata_add_dev(parent, phy, child, phy_id);
} else if (phy->attached_tproto & SAS_PROTOCOL_SSP) {
res = sas_ex_add_dev(parent, phy, child, phy_id);
} else {
pr_notice("target proto 0x%x at %016llx:0x%x not handled\n",
phy->attached_tproto, SAS_ADDR(parent->sas_addr),
phy_id);
res = -ENODEV;
}
if (res)
goto out_free;
list_add_tail(&child->siblings, &parent_ex->children);
return child;
out_free:
sas_port_delete(phy->port);
out_err:
phy->port = NULL;
sas_put_device(child);
return NULL;
}
/* See if this phy is part of a wide port */
static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
{
struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
int i;
for (i = 0; i < parent->ex_dev.num_phys; i++) {
struct ex_phy *ephy = &parent->ex_dev.ex_phy[i];
if (ephy == phy)
continue;
if (!memcmp(phy->attached_sas_addr, ephy->attached_sas_addr,
SAS_ADDR_SIZE) && ephy->port) {
sas_port_add_phy(ephy->port, phy->phy);
phy->port = ephy->port;
phy->phy_state = PHY_DEVICE_DISCOVERED;
return true;
}
}
return false;
}
static struct domain_device *sas_ex_discover_expander(
struct domain_device *parent, int phy_id)
{
struct sas_expander_device *parent_ex = rphy_to_expander_device(parent->rphy);
struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
struct domain_device *child = NULL;
struct sas_rphy *rphy;
struct sas_expander_device *edev;
struct asd_sas_port *port;
int res;
if (phy->routing_attr == DIRECT_ROUTING) {
pr_warn("ex %016llx:%02d:D <--> ex %016llx:0x%x is not allowed\n",
SAS_ADDR(parent->sas_addr), phy_id,
SAS_ADDR(phy->attached_sas_addr),
phy->attached_phy_id);
return NULL;
}
child = sas_alloc_device();
if (!child)
return NULL;
phy->port = sas_port_alloc(&parent->rphy->dev, phy_id);
/* FIXME: better error handling */
BUG_ON(sas_port_add(phy->port) != 0);
switch (phy->attached_dev_type) {
case SAS_EDGE_EXPANDER_DEVICE:
rphy = sas_expander_alloc(phy->port,
SAS_EDGE_EXPANDER_DEVICE);
break;
case SAS_FANOUT_EXPANDER_DEVICE:
rphy = sas_expander_alloc(phy->port,
SAS_FANOUT_EXPANDER_DEVICE);
break;
default:
rphy = NULL; /* shut gcc up */
BUG();
}
port = parent->port;
child->rphy = rphy;
get_device(&rphy->dev);
edev = rphy_to_expander_device(rphy);
child->dev_type = phy->attached_dev_type;
kref_get(&parent->kref);
child->parent = parent;
child->port = port;
child->iproto = phy->attached_iproto;
child->tproto = phy->attached_tproto;
memcpy(child->sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
sas_hash_addr(child->hashed_sas_addr, child->sas_addr);
sas_ex_get_linkrate(parent, child, phy);
edev->level = parent_ex->level + 1;
parent->port->disc.max_level = max(parent->port->disc.max_level,
edev->level);
sas_init_dev(child);
sas_fill_in_rphy(child, rphy);
sas_rphy_add(rphy);
spin_lock_irq(&parent->port->dev_list_lock);
list_add_tail(&child->dev_list_node, &parent->port->dev_list);
spin_unlock_irq(&parent->port->dev_list_lock);
res = sas_discover_expander(child);
if (res) {
sas_rphy_delete(rphy);
spin_lock_irq(&parent->port->dev_list_lock);
list_del(&child->dev_list_node);
spin_unlock_irq(&parent->port->dev_list_lock);
sas_put_device(child);
sas_port_delete(phy->port);
phy->port = NULL;
return NULL;
}
list_add_tail(&child->siblings, &parent->ex_dev.children);
return child;
}
static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
{
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *ex_phy = &ex->ex_phy[phy_id];
struct domain_device *child = NULL;
int res = 0;
/* Phy state */
if (ex_phy->linkrate == SAS_SATA_SPINUP_HOLD) {
if (!sas_smp_phy_control(dev, phy_id, PHY_FUNC_LINK_RESET, NULL))
res = sas_ex_phy_discover(dev, phy_id);
if (res)
return res;
}
/* Parent and domain coherency */
if (!dev->parent && sas_phy_match_port_addr(dev->port, ex_phy)) {
sas_add_parent_port(dev, phy_id);
return 0;
}
if (dev->parent && sas_phy_match_dev_addr(dev->parent, ex_phy)) {
sas_add_parent_port(dev, phy_id);
if (ex_phy->routing_attr == TABLE_ROUTING)
sas_configure_phy(dev, phy_id, dev->port->sas_addr, 1);
return 0;
}
if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr))
sas_ex_disable_port(dev, ex_phy->attached_sas_addr);
if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) {
if (ex_phy->routing_attr == DIRECT_ROUTING) {
memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
sas_configure_routing(dev, ex_phy->attached_sas_addr);
}
return 0;
} else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN)
return 0;
if (ex_phy->attached_dev_type != SAS_END_DEVICE &&
ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE &&
ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE &&
ex_phy->attached_dev_type != SAS_SATA_PENDING) {
pr_warn("unknown device type(0x%x) attached to ex %016llx phy%02d\n",
ex_phy->attached_dev_type,
SAS_ADDR(dev->sas_addr),
phy_id);
return 0;
}
res = sas_configure_routing(dev, ex_phy->attached_sas_addr);
if (res) {
pr_notice("configure routing for dev %016llx reported 0x%x. Forgotten\n",
SAS_ADDR(ex_phy->attached_sas_addr), res);
sas_disable_routing(dev, ex_phy->attached_sas_addr);
return res;
}
if (sas_ex_join_wide_port(dev, phy_id)) {
pr_debug("Attaching ex phy%02d to wide port %016llx\n",
phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
return res;
}
switch (ex_phy->attached_dev_type) {
case SAS_END_DEVICE:
case SAS_SATA_PENDING:
child = sas_ex_discover_end_dev(dev, phy_id);
break;
case SAS_FANOUT_EXPANDER_DEVICE:
if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) {
pr_debug("second fanout expander %016llx phy%02d attached to ex %016llx phy%02d\n",
SAS_ADDR(ex_phy->attached_sas_addr),
ex_phy->attached_phy_id,
SAS_ADDR(dev->sas_addr),
phy_id);
sas_ex_disable_phy(dev, phy_id);
return res;
} else
memcpy(dev->port->disc.fanout_sas_addr,
ex_phy->attached_sas_addr, SAS_ADDR_SIZE);
fallthrough;
case SAS_EDGE_EXPANDER_DEVICE:
child = sas_ex_discover_expander(dev, phy_id);
break;
default:
break;
}
if (!child)
pr_notice("ex %016llx phy%02d failed to discover\n",
SAS_ADDR(dev->sas_addr), phy_id);
return res;
}
static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr)
{
struct expander_device *ex = &dev->ex_dev;
int i;
for (i = 0; i < ex->num_phys; i++) {
struct ex_phy *phy = &ex->ex_phy[i];
if (phy->phy_state == PHY_VACANT ||
phy->phy_state == PHY_NOT_PRESENT)
continue;
if (dev_is_expander(phy->attached_dev_type) &&
phy->routing_attr == SUBTRACTIVE_ROUTING) {
memcpy(sub_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
return 1;
}
}
return 0;
}
static int sas_check_level_subtractive_boundary(struct domain_device *dev)
{
struct expander_device *ex = &dev->ex_dev;
struct domain_device *child;
u8 sub_addr[SAS_ADDR_SIZE] = {0, };
list_for_each_entry(child, &ex->children, siblings) {
if (!dev_is_expander(child->dev_type))
continue;
if (sub_addr[0] == 0) {
sas_find_sub_addr(child, sub_addr);
continue;
} else {
u8 s2[SAS_ADDR_SIZE];
if (sas_find_sub_addr(child, s2) &&
(SAS_ADDR(sub_addr) != SAS_ADDR(s2))) {
pr_notice("ex %016llx->%016llx-?->%016llx diverges from subtractive boundary %016llx\n",
SAS_ADDR(dev->sas_addr),
SAS_ADDR(child->sas_addr),
SAS_ADDR(s2),
SAS_ADDR(sub_addr));
sas_ex_disable_port(child, s2);
}
}
}
return 0;
}
/**
* sas_ex_discover_devices - discover devices attached to this expander
* @dev: pointer to the expander domain device
* @single: if you want to do a single phy, else set to -1;
*
* Configure this expander for use with its devices and register the
* devices of this expander.
*/
static int sas_ex_discover_devices(struct domain_device *dev, int single)
{
struct expander_device *ex = &dev->ex_dev;
int i = 0, end = ex->num_phys;
int res = 0;
if (0 <= single && single < end) {
i = single;
end = i+1;
}
for ( ; i < end; i++) {
struct ex_phy *ex_phy = &ex->ex_phy[i];
if (ex_phy->phy_state == PHY_VACANT ||
ex_phy->phy_state == PHY_NOT_PRESENT ||
ex_phy->phy_state == PHY_DEVICE_DISCOVERED)
continue;
switch (ex_phy->linkrate) {
case SAS_PHY_DISABLED:
case SAS_PHY_RESET_PROBLEM:
case SAS_SATA_PORT_SELECTOR:
continue;
default:
res = sas_ex_discover_dev(dev, i);
if (res)
break;
continue;
}
}
if (!res)
sas_check_level_subtractive_boundary(dev);
return res;
}
static int sas_check_ex_subtractive_boundary(struct domain_device *dev)
{
struct expander_device *ex = &dev->ex_dev;
int i;
u8 *sub_sas_addr = NULL;
if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE)
return 0;
for (i = 0; i < ex->num_phys; i++) {
struct ex_phy *phy = &ex->ex_phy[i];
if (phy->phy_state == PHY_VACANT ||
phy->phy_state == PHY_NOT_PRESENT)
continue;
if (dev_is_expander(phy->attached_dev_type) &&
phy->routing_attr == SUBTRACTIVE_ROUTING) {
if (!sub_sas_addr)
sub_sas_addr = &phy->attached_sas_addr[0];
else if (SAS_ADDR(sub_sas_addr) !=
SAS_ADDR(phy->attached_sas_addr)) {
pr_notice("ex %016llx phy%02d diverges(%016llx) on subtractive boundary(%016llx). Disabled\n",
SAS_ADDR(dev->sas_addr), i,
SAS_ADDR(phy->attached_sas_addr),
SAS_ADDR(sub_sas_addr));
sas_ex_disable_phy(dev, i);
}
}
}
return 0;
}
static void sas_print_parent_topology_bug(struct domain_device *child,
struct ex_phy *parent_phy,
struct ex_phy *child_phy)
{
static const char *ex_type[] = {
[SAS_EDGE_EXPANDER_DEVICE] = "edge",
[SAS_FANOUT_EXPANDER_DEVICE] = "fanout",
};
struct domain_device *parent = child->parent;
pr_notice("%s ex %016llx phy%02d <--> %s ex %016llx phy%02d has %c:%c routing link!\n",
ex_type[parent->dev_type],
SAS_ADDR(parent->sas_addr),
parent_phy->phy_id,
ex_type[child->dev_type],
SAS_ADDR(child->sas_addr),
child_phy->phy_id,
sas_route_char(parent, parent_phy),
sas_route_char(child, child_phy));
}
static bool sas_eeds_valid(struct domain_device *parent,
struct domain_device *child)
{
struct sas_discovery *disc = &parent->port->disc;
return (SAS_ADDR(disc->eeds_a) == SAS_ADDR(parent->sas_addr) ||
SAS_ADDR(disc->eeds_a) == SAS_ADDR(child->sas_addr)) &&
(SAS_ADDR(disc->eeds_b) == SAS_ADDR(parent->sas_addr) ||
SAS_ADDR(disc->eeds_b) == SAS_ADDR(child->sas_addr));
}
static int sas_check_eeds(struct domain_device *child,
struct ex_phy *parent_phy,
struct ex_phy *child_phy)
{
int res = 0;
struct domain_device *parent = child->parent;
struct sas_discovery *disc = &parent->port->disc;
if (SAS_ADDR(disc->fanout_sas_addr) != 0) {
res = -ENODEV;
pr_warn("edge ex %016llx phy S:%02d <--> edge ex %016llx phy S:%02d, while there is a fanout ex %016llx\n",
SAS_ADDR(parent->sas_addr),
parent_phy->phy_id,
SAS_ADDR(child->sas_addr),
child_phy->phy_id,
SAS_ADDR(disc->fanout_sas_addr));
} else if (SAS_ADDR(disc->eeds_a) == 0) {
memcpy(disc->eeds_a, parent->sas_addr, SAS_ADDR_SIZE);
memcpy(disc->eeds_b, child->sas_addr, SAS_ADDR_SIZE);
} else if (!sas_eeds_valid(parent, child)) {
res = -ENODEV;
pr_warn("edge ex %016llx phy%02d <--> edge ex %016llx phy%02d link forms a third EEDS!\n",
SAS_ADDR(parent->sas_addr),
parent_phy->phy_id,
SAS_ADDR(child->sas_addr),
child_phy->phy_id);
}
return res;
}
static int sas_check_edge_expander_topo(struct domain_device *child,
struct ex_phy *parent_phy)
{
struct expander_device *child_ex = &child->ex_dev;
struct expander_device *parent_ex = &child->parent->ex_dev;
struct ex_phy *child_phy;
child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) {
if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING ||
child_phy->routing_attr != TABLE_ROUTING)
goto error;
} else if (parent_phy->routing_attr == SUBTRACTIVE_ROUTING) {
if (child_phy->routing_attr == SUBTRACTIVE_ROUTING)
return sas_check_eeds(child, parent_phy, child_phy);
else if (child_phy->routing_attr != TABLE_ROUTING)
goto error;
} else if (parent_phy->routing_attr == TABLE_ROUTING) {
if (child_phy->routing_attr != SUBTRACTIVE_ROUTING &&
(child_phy->routing_attr != TABLE_ROUTING ||
!child_ex->t2t_supp || !parent_ex->t2t_supp))
goto error;
}
return 0;
error:
sas_print_parent_topology_bug(child, parent_phy, child_phy);
return -ENODEV;
}
static int sas_check_fanout_expander_topo(struct domain_device *child,
struct ex_phy *parent_phy)
{
struct expander_device *child_ex = &child->ex_dev;
struct ex_phy *child_phy;
child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id];
if (parent_phy->routing_attr == TABLE_ROUTING &&
child_phy->routing_attr == SUBTRACTIVE_ROUTING)
return 0;
sas_print_parent_topology_bug(child, parent_phy, child_phy);
return -ENODEV;
}
static int sas_check_parent_topology(struct domain_device *child)
{
struct expander_device *parent_ex;
int i;
int res = 0;
if (!child->parent)
return 0;
if (!dev_is_expander(child->parent->dev_type))
return 0;
parent_ex = &child->parent->ex_dev;
for (i = 0; i < parent_ex->num_phys; i++) {
struct ex_phy *parent_phy = &parent_ex->ex_phy[i];
if (parent_phy->phy_state == PHY_VACANT ||
parent_phy->phy_state == PHY_NOT_PRESENT)
continue;
if (!sas_phy_match_dev_addr(child, parent_phy))
continue;
switch (child->parent->dev_type) {
case SAS_EDGE_EXPANDER_DEVICE:
if (sas_check_edge_expander_topo(child, parent_phy))
res = -ENODEV;
break;
case SAS_FANOUT_EXPANDER_DEVICE:
if (sas_check_fanout_expander_topo(child, parent_phy))
res = -ENODEV;
break;
default:
break;
}
}
return res;
}
#define RRI_REQ_SIZE 16
#define RRI_RESP_SIZE 44
static int sas_configure_present(struct domain_device *dev, int phy_id,
u8 *sas_addr, int *index, int *present)
{
int i, res = 0;
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
u8 *rri_req;
u8 *rri_resp;
*present = 0;
*index = 0;
rri_req = alloc_smp_req(RRI_REQ_SIZE);
if (!rri_req)
return -ENOMEM;
rri_resp = alloc_smp_resp(RRI_RESP_SIZE);
if (!rri_resp) {
kfree(rri_req);
return -ENOMEM;
}
rri_req[1] = SMP_REPORT_ROUTE_INFO;
rri_req[9] = phy_id;
for (i = 0; i < ex->max_route_indexes ; i++) {
*(__be16 *)(rri_req+6) = cpu_to_be16(i);
res = smp_execute_task(dev, rri_req, RRI_REQ_SIZE, rri_resp,
RRI_RESP_SIZE);
if (res)
goto out;
res = rri_resp[2];
if (res == SMP_RESP_NO_INDEX) {
pr_warn("overflow of indexes: dev %016llx phy%02d index 0x%x\n",
SAS_ADDR(dev->sas_addr), phy_id, i);
goto out;
} else if (res != SMP_RESP_FUNC_ACC) {
pr_notice("%s: dev %016llx phy%02d index 0x%x result 0x%x\n",
__func__, SAS_ADDR(dev->sas_addr), phy_id,
i, res);
goto out;
}
if (SAS_ADDR(sas_addr) != 0) {
if (SAS_ADDR(rri_resp+16) == SAS_ADDR(sas_addr)) {
*index = i;
if ((rri_resp[12] & 0x80) == 0x80)
*present = 0;
else
*present = 1;
goto out;
} else if (SAS_ADDR(rri_resp+16) == 0) {
*index = i;
*present = 0;
goto out;
}
} else if (SAS_ADDR(rri_resp+16) == 0 &&
phy->last_da_index < i) {
phy->last_da_index = i;
*index = i;
*present = 0;
goto out;
}
}
res = -1;
out:
kfree(rri_req);
kfree(rri_resp);
return res;
}
#define CRI_REQ_SIZE 44
#define CRI_RESP_SIZE 8
static int sas_configure_set(struct domain_device *dev, int phy_id,
u8 *sas_addr, int index, int include)
{
int res;
u8 *cri_req;
u8 *cri_resp;
cri_req = alloc_smp_req(CRI_REQ_SIZE);
if (!cri_req)
return -ENOMEM;
cri_resp = alloc_smp_resp(CRI_RESP_SIZE);
if (!cri_resp) {
kfree(cri_req);
return -ENOMEM;
}
cri_req[1] = SMP_CONF_ROUTE_INFO;
*(__be16 *)(cri_req+6) = cpu_to_be16(index);
cri_req[9] = phy_id;
if (SAS_ADDR(sas_addr) == 0 || !include)
cri_req[12] |= 0x80;
memcpy(cri_req+16, sas_addr, SAS_ADDR_SIZE);
res = smp_execute_task(dev, cri_req, CRI_REQ_SIZE, cri_resp,
CRI_RESP_SIZE);
if (res)
goto out;
res = cri_resp[2];
if (res == SMP_RESP_NO_INDEX) {
pr_warn("overflow of indexes: dev %016llx phy%02d index 0x%x\n",
SAS_ADDR(dev->sas_addr), phy_id, index);
}
out:
kfree(cri_req);
kfree(cri_resp);
return res;
}
static int sas_configure_phy(struct domain_device *dev, int phy_id,
u8 *sas_addr, int include)
{
int index;
int present;
int res;
res = sas_configure_present(dev, phy_id, sas_addr, &index, &present);
if (res)
return res;
if (include ^ present)
return sas_configure_set(dev, phy_id, sas_addr, index,
include);
return res;
}
/**
* sas_configure_parent - configure routing table of parent
* @parent: parent expander
* @child: child expander
* @sas_addr: SAS port identifier of device directly attached to child
* @include: whether or not to include @child in the expander routing table
*/
static int sas_configure_parent(struct domain_device *parent,
struct domain_device *child,
u8 *sas_addr, int include)
{
struct expander_device *ex_parent = &parent->ex_dev;
int res = 0;
int i;
if (parent->parent) {
res = sas_configure_parent(parent->parent, parent, sas_addr,
include);
if (res)
return res;
}
if (ex_parent->conf_route_table == 0) {
pr_debug("ex %016llx has self-configuring routing table\n",
SAS_ADDR(parent->sas_addr));
return 0;
}
for (i = 0; i < ex_parent->num_phys; i++) {
struct ex_phy *phy = &ex_parent->ex_phy[i];
if ((phy->routing_attr == TABLE_ROUTING) &&
sas_phy_match_dev_addr(child, phy)) {
res = sas_configure_phy(parent, i, sas_addr, include);
if (res)
return res;
}
}
return res;
}
/**
* sas_configure_routing - configure routing
* @dev: expander device
* @sas_addr: port identifier of device directly attached to the expander device
*/
static int sas_configure_routing(struct domain_device *dev, u8 *sas_addr)
{
if (dev->parent)
return sas_configure_parent(dev->parent, dev, sas_addr, 1);
return 0;
}
static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr)
{
if (dev->parent)
return sas_configure_parent(dev->parent, dev, sas_addr, 0);
return 0;
}
/**
* sas_discover_expander - expander discovery
* @dev: pointer to expander domain device
*
* See comment in sas_discover_sata().
*/
static int sas_discover_expander(struct domain_device *dev)
{
int res;
res = sas_notify_lldd_dev_found(dev);
if (res)
return res;
res = sas_ex_general(dev);
if (res)
goto out_err;
res = sas_ex_manuf_info(dev);
if (res)
goto out_err;
res = sas_expander_discover(dev);
if (res) {
pr_warn("expander %016llx discovery failed(0x%x)\n",
SAS_ADDR(dev->sas_addr), res);
goto out_err;
}
sas_check_ex_subtractive_boundary(dev);
res = sas_check_parent_topology(dev);
if (res)
goto out_err;
return 0;
out_err:
sas_notify_lldd_dev_gone(dev);
return res;
}
static int sas_ex_level_discovery(struct asd_sas_port *port, const int level)
{
int res = 0;
struct domain_device *dev;
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
if (dev_is_expander(dev->dev_type)) {
struct sas_expander_device *ex =
rphy_to_expander_device(dev->rphy);
if (level == ex->level)
res = sas_ex_discover_devices(dev, -1);
else if (level > 0)
res = sas_ex_discover_devices(port->port_dev, -1);
}
}
return res;
}
static int sas_ex_bfs_disc(struct asd_sas_port *port)
{
int res;
int level;
do {
level = port->disc.max_level;
res = sas_ex_level_discovery(port, level);
mb();
} while (level < port->disc.max_level);
return res;
}
int sas_discover_root_expander(struct domain_device *dev)
{
int res;
struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
res = sas_rphy_add(dev->rphy);
if (res)
goto out_err;
ex->level = dev->port->disc.max_level; /* 0 */
res = sas_discover_expander(dev);
if (res)
goto out_err2;
sas_ex_bfs_disc(dev->port);
return res;
out_err2:
sas_rphy_remove(dev->rphy);
out_err:
return res;
}
/* ---------- Domain revalidation ---------- */
static int sas_get_phy_discover(struct domain_device *dev,
int phy_id, struct smp_disc_resp *disc_resp)
{
int res;
u8 *disc_req;
disc_req = alloc_smp_req(DISCOVER_REQ_SIZE);
if (!disc_req)
return -ENOMEM;
disc_req[1] = SMP_DISCOVER;
disc_req[9] = phy_id;
res = smp_execute_task(dev, disc_req, DISCOVER_REQ_SIZE,
disc_resp, DISCOVER_RESP_SIZE);
if (res)
goto out;
if (disc_resp->result != SMP_RESP_FUNC_ACC)
res = disc_resp->result;
out:
kfree(disc_req);
return res;
}
static int sas_get_phy_change_count(struct domain_device *dev,
int phy_id, int *pcc)
{
int res;
struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
return -ENOMEM;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
if (!res)
*pcc = disc_resp->disc.change_count;
kfree(disc_resp);
return res;
}
int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id,
u8 *sas_addr, enum sas_device_type *type)
{
int res;
struct smp_disc_resp *disc_resp;
disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
if (!disc_resp)
return -ENOMEM;
res = sas_get_phy_discover(dev, phy_id, disc_resp);
if (res == 0) {
memcpy(sas_addr, disc_resp->disc.attached_sas_addr,
SAS_ADDR_SIZE);
*type = to_dev_type(&disc_resp->disc);
if (*type == 0)
memset(sas_addr, 0, SAS_ADDR_SIZE);
}
kfree(disc_resp);
return res;
}
static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id,
int from_phy, bool update)
{
struct expander_device *ex = &dev->ex_dev;
int res = 0;
int i;
for (i = from_phy; i < ex->num_phys; i++) {
int phy_change_count = 0;
res = sas_get_phy_change_count(dev, i, &phy_change_count);
switch (res) {
case SMP_RESP_PHY_VACANT:
case SMP_RESP_NO_PHY:
continue;
case SMP_RESP_FUNC_ACC:
break;
default:
return res;
}
if (phy_change_count != ex->ex_phy[i].phy_change_count) {
if (update)
ex->ex_phy[i].phy_change_count =
phy_change_count;
*phy_id = i;
return 0;
}
}
return 0;
}
static int sas_get_ex_change_count(struct domain_device *dev, int *ecc)
{
int res;
u8 *rg_req;
struct smp_rg_resp *rg_resp;
rg_req = alloc_smp_req(RG_REQ_SIZE);
if (!rg_req)
return -ENOMEM;
rg_resp = alloc_smp_resp(RG_RESP_SIZE);
if (!rg_resp) {
kfree(rg_req);
return -ENOMEM;
}
rg_req[1] = SMP_REPORT_GENERAL;
res = smp_execute_task(dev, rg_req, RG_REQ_SIZE, rg_resp,
RG_RESP_SIZE);
if (res)
goto out;
if (rg_resp->result != SMP_RESP_FUNC_ACC) {
res = rg_resp->result;
goto out;
}
*ecc = be16_to_cpu(rg_resp->rg.change_count);
out:
kfree(rg_resp);
kfree(rg_req);
return res;
}
/**
* sas_find_bcast_dev - find the device issue BROADCAST(CHANGE).
* @dev:domain device to be detect.
* @src_dev: the device which originated BROADCAST(CHANGE).
*
* Add self-configuration expander support. Suppose two expander cascading,
* when the first level expander is self-configuring, hotplug the disks in
* second level expander, BROADCAST(CHANGE) will not only be originated
* in the second level expander, but also be originated in the first level
* expander (see SAS protocol SAS 2r-14, 7.11 for detail), it is to say,
* expander changed count in two level expanders will all increment at least
* once, but the phy which chang count has changed is the source device which
* we concerned.
*/
static int sas_find_bcast_dev(struct domain_device *dev,
struct domain_device **src_dev)
{
struct expander_device *ex = &dev->ex_dev;
int ex_change_count = -1;
int phy_id = -1;
int res;
struct domain_device *ch;
res = sas_get_ex_change_count(dev, &ex_change_count);
if (res)
goto out;
if (ex_change_count != -1 && ex_change_count != ex->ex_change_count) {
/* Just detect if this expander phys phy change count changed,
* in order to determine if this expander originate BROADCAST,
* and do not update phy change count field in our structure.
*/
res = sas_find_bcast_phy(dev, &phy_id, 0, false);
if (phy_id != -1) {
*src_dev = dev;
ex->ex_change_count = ex_change_count;
pr_info("ex %016llx phy%02d change count has changed\n",
SAS_ADDR(dev->sas_addr), phy_id);
return res;
} else
pr_info("ex %016llx phys DID NOT change\n",
SAS_ADDR(dev->sas_addr));
}
list_for_each_entry(ch, &ex->children, siblings) {
if (dev_is_expander(ch->dev_type)) {
res = sas_find_bcast_dev(ch, src_dev);
if (*src_dev)
return res;
}
}
out:
return res;
}
static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev)
{
struct expander_device *ex = &dev->ex_dev;
struct domain_device *child, *n;
list_for_each_entry_safe(child, n, &ex->children, siblings) {
set_bit(SAS_DEV_GONE, &child->state);
if (dev_is_expander(child->dev_type))
sas_unregister_ex_tree(port, child);
else
sas_unregister_dev(port, child);
}
sas_unregister_dev(port, dev);
}
static void sas_unregister_devs_sas_addr(struct domain_device *parent,
int phy_id, bool last)
{
struct expander_device *ex_dev = &parent->ex_dev;
struct ex_phy *phy = &ex_dev->ex_phy[phy_id];
struct domain_device *child, *n, *found = NULL;
if (last) {
list_for_each_entry_safe(child, n,
&ex_dev->children, siblings) {
if (sas_phy_match_dev_addr(child, phy)) {
set_bit(SAS_DEV_GONE, &child->state);
if (dev_is_expander(child->dev_type))
sas_unregister_ex_tree(parent->port, child);
else
sas_unregister_dev(parent->port, child);
found = child;
break;
}
}
sas_disable_routing(parent, phy->attached_sas_addr);
}
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
if (phy->port) {
sas_port_delete_phy(phy->port, phy->phy);
sas_device_set_phy(found, phy->port);
if (phy->port->num_phys == 0)
list_add_tail(&phy->port->del_list,
&parent->port->sas_port_del_list);
phy->port = NULL;
}
}
static int sas_discover_bfs_by_root_level(struct domain_device *root,
const int level)
{
struct expander_device *ex_root = &root->ex_dev;
struct domain_device *child;
int res = 0;
list_for_each_entry(child, &ex_root->children, siblings) {
if (dev_is_expander(child->dev_type)) {
struct sas_expander_device *ex =
rphy_to_expander_device(child->rphy);
if (level > ex->level)
res = sas_discover_bfs_by_root_level(child,
level);
else if (level == ex->level)
res = sas_ex_discover_devices(child, -1);
}
}
return res;
}
static int sas_discover_bfs_by_root(struct domain_device *dev)
{
int res;
struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy);
int level = ex->level+1;
res = sas_ex_discover_devices(dev, -1);
if (res)
goto out;
do {
res = sas_discover_bfs_by_root_level(dev, level);
mb();
level += 1;
} while (level <= dev->port->disc.max_level);
out:
return res;
}
static int sas_discover_new(struct domain_device *dev, int phy_id)
{
struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
struct domain_device *child;
int res;
pr_debug("ex %016llx phy%02d new device attached\n",
SAS_ADDR(dev->sas_addr), phy_id);
res = sas_ex_phy_discover(dev, phy_id);
if (res)
return res;
if (sas_ex_join_wide_port(dev, phy_id))
return 0;
res = sas_ex_discover_devices(dev, phy_id);
if (res)
return res;
list_for_each_entry(child, &dev->ex_dev.children, siblings) {
if (sas_phy_match_dev_addr(child, ex_phy)) {
if (dev_is_expander(child->dev_type))
res = sas_discover_bfs_by_root(child);
break;
}
}
return res;
}
static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old)
{
if (old == new)
return true;
/* treat device directed resets as flutter, if we went
* SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery
*/
if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) ||
(old == SAS_END_DEVICE && new == SAS_SATA_PENDING))
return true;
return false;
}
static int sas_rediscover_dev(struct domain_device *dev, int phy_id,
bool last, int sibling)
{
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *phy = &ex->ex_phy[phy_id];
enum sas_device_type type = SAS_PHY_UNUSED;
u8 sas_addr[SAS_ADDR_SIZE];
char msg[80] = "";
int res;
if (!last)
sprintf(msg, ", part of a wide port with phy%02d", sibling);
pr_debug("ex %016llx rediscovering phy%02d%s\n",
SAS_ADDR(dev->sas_addr), phy_id, msg);
memset(sas_addr, 0, SAS_ADDR_SIZE);
res = sas_get_phy_attached_dev(dev, phy_id, sas_addr, &type);
switch (res) {
case SMP_RESP_NO_PHY:
phy->phy_state = PHY_NOT_PRESENT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
return res;
case SMP_RESP_PHY_VACANT:
phy->phy_state = PHY_VACANT;
sas_unregister_devs_sas_addr(dev, phy_id, last);
return res;
case SMP_RESP_FUNC_ACC:
break;
case -ECOMM:
break;
default:
return res;
}
if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
phy->phy_state = PHY_EMPTY;
sas_unregister_devs_sas_addr(dev, phy_id, last);
/*
* Even though the PHY is empty, for convenience we discover
* the PHY to update the PHY info, like negotiated linkrate.
*/
sas_ex_phy_discover(dev, phy_id);
return res;
} else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
dev_type_flutter(type, phy->attached_dev_type)) {
struct domain_device *ata_dev = sas_ex_to_ata(dev, phy_id);
char *action = "";
sas_ex_phy_discover(dev, phy_id);
if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING)
action = ", needs recovery";
pr_debug("ex %016llx phy%02d broadcast flutter%s\n",
SAS_ADDR(dev->sas_addr), phy_id, action);
return res;
}
/* we always have to delete the old device when we went here */
pr_info("ex %016llx phy%02d replace %016llx\n",
SAS_ADDR(dev->sas_addr), phy_id,
SAS_ADDR(phy->attached_sas_addr));
sas_unregister_devs_sas_addr(dev, phy_id, last);
return sas_discover_new(dev, phy_id);
}
/**
* sas_rediscover - revalidate the domain.
* @dev:domain device to be detect.
* @phy_id: the phy id will be detected.
*
* NOTE: this process _must_ quit (return) as soon as any connection
* errors are encountered. Connection recovery is done elsewhere.
* Discover process only interrogates devices in order to discover the
* domain.For plugging out, we un-register the device only when it is
* the last phy in the port, for other phys in this port, we just delete it
* from the port.For inserting, we do discovery when it is the
* first phy,for other phys in this port, we add it to the port to
* forming the wide-port.
*/
static int sas_rediscover(struct domain_device *dev, const int phy_id)
{
struct expander_device *ex = &dev->ex_dev;
struct ex_phy *changed_phy = &ex->ex_phy[phy_id];
int res = 0;
int i;
bool last = true; /* is this the last phy of the port */
pr_debug("ex %016llx phy%02d originated BROADCAST(CHANGE)\n",
SAS_ADDR(dev->sas_addr), phy_id);
if (SAS_ADDR(changed_phy->attached_sas_addr) != 0) {
for (i = 0; i < ex->num_phys; i++) {
struct ex_phy *phy = &ex->ex_phy[i];
if (i == phy_id)
continue;
if (sas_phy_addr_match(phy, changed_phy)) {
last = false;
break;
}
}
res = sas_rediscover_dev(dev, phy_id, last, i);
} else
res = sas_discover_new(dev, phy_id);
return res;
}
/**
* sas_ex_revalidate_domain - revalidate the domain
* @port_dev: port domain device.
*
* NOTE: this process _must_ quit (return) as soon as any connection
* errors are encountered. Connection recovery is done elsewhere.
* Discover process only interrogates devices in order to discover the
* domain.
*/
int sas_ex_revalidate_domain(struct domain_device *port_dev)
{
int res;
struct domain_device *dev = NULL;
res = sas_find_bcast_dev(port_dev, &dev);
if (res == 0 && dev) {
struct expander_device *ex = &dev->ex_dev;
int i = 0, phy_id;
do {
phy_id = -1;
res = sas_find_bcast_phy(dev, &phy_id, i, true);
if (phy_id == -1)
break;
res = sas_rediscover(dev, phy_id);
i = phy_id + 1;
} while (i < ex->num_phys);
}
return res;
}
int sas_find_attached_phy_id(struct expander_device *ex_dev,
struct domain_device *dev)
{
struct ex_phy *phy;
int phy_id;
for (phy_id = 0; phy_id < ex_dev->num_phys; phy_id++) {
phy = &ex_dev->ex_phy[phy_id];
if (sas_phy_match_dev_addr(dev, phy))
return phy_id;
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(sas_find_attached_phy_id);
void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
struct sas_rphy *rphy)
{
struct domain_device *dev;
unsigned int rcvlen = 0;
int ret = -EINVAL;
/* no rphy means no smp target support (ie aic94xx host) */
if (!rphy)
return sas_smp_host_handler(job, shost);
switch (rphy->identify.device_type) {
case SAS_EDGE_EXPANDER_DEVICE:
case SAS_FANOUT_EXPANDER_DEVICE:
break;
default:
pr_err("%s: can we send a smp request to a device?\n",
__func__);
goto out;
}
dev = sas_find_dev_by_rphy(rphy);
if (!dev) {
pr_err("%s: fail to find a domain_device?\n", __func__);
goto out;
}
/* do we need to support multiple segments? */
if (job->request_payload.sg_cnt > 1 ||
job->reply_payload.sg_cnt > 1) {
pr_info("%s: multiple segments req %u, rsp %u\n",
__func__, job->request_payload.payload_len,
job->reply_payload.payload_len);
goto out;
}
ret = smp_execute_task_sg(dev, job->request_payload.sg_list,
job->reply_payload.sg_list);
if (ret >= 0) {
/* bsg_job_done() requires the length received */
rcvlen = job->reply_payload.payload_len - ret;
ret = 0;
}
out:
bsg_job_done(job, ret, rcvlen);
}
| linux-master | drivers/scsi/libsas/sas_expander.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Serial Attached SCSI (SAS) Event processing
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <[email protected]>
*/
#include <linux/export.h>
#include <scsi/scsi_host.h>
#include "sas_internal.h"
bool sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
return false;
if (test_bit(SAS_HA_DRAINING, &ha->state)) {
/* add it to the defer list, if not already pending */
if (list_empty(&sw->drain_node))
list_add_tail(&sw->drain_node, &ha->defer_q);
return true;
}
return queue_work(ha->event_q, &sw->work);
}
static bool sas_queue_event(int event, struct sas_work *work,
struct sas_ha_struct *ha)
{
unsigned long flags;
bool rc;
spin_lock_irqsave(&ha->lock, flags);
rc = sas_queue_work(ha, work);
spin_unlock_irqrestore(&ha->lock, flags);
return rc;
}
void sas_queue_deferred_work(struct sas_ha_struct *ha)
{
struct sas_work *sw, *_sw;
spin_lock_irq(&ha->lock);
list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
list_del_init(&sw->drain_node);
if (!sas_queue_work(ha, sw)) {
pm_runtime_put(ha->dev);
sas_free_event(to_asd_sas_event(&sw->work));
}
}
spin_unlock_irq(&ha->lock);
}
void __sas_drain_work(struct sas_ha_struct *ha)
{
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
spin_unlock_irq(&ha->lock);
drain_workqueue(ha->event_q);
drain_workqueue(ha->disco_q);
clear_bit(SAS_HA_DRAINING, &ha->state);
sas_queue_deferred_work(ha);
}
int sas_drain_work(struct sas_ha_struct *ha)
{
int err;
err = mutex_lock_interruptible(&ha->drain_mutex);
if (err)
return err;
if (test_bit(SAS_HA_REGISTERED, &ha->state))
__sas_drain_work(ha);
mutex_unlock(&ha->drain_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(sas_drain_work);
void sas_disable_revalidation(struct sas_ha_struct *ha)
{
mutex_lock(&ha->disco_mutex);
set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
mutex_unlock(&ha->disco_mutex);
}
void sas_enable_revalidation(struct sas_ha_struct *ha)
{
int i;
mutex_lock(&ha->disco_mutex);
clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
const int ev = DISCE_REVALIDATE_DOMAIN;
struct sas_discovery *d = &port->disc;
struct asd_sas_phy *sas_phy;
if (!test_and_clear_bit(ev, &d->pending))
continue;
spin_lock(&port->phy_list_lock);
if (list_empty(&port->phy_list)) {
spin_unlock(&port->phy_list_lock);
continue;
}
sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
port_phy_el);
spin_unlock(&port->phy_list_lock);
sas_notify_port_event(sas_phy,
PORTE_BROADCAST_RCVD, GFP_KERNEL);
}
mutex_unlock(&ha->disco_mutex);
}
static void sas_port_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *ha = phy->ha;
sas_port_event_fns[ev->event](work);
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
static void sas_phy_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *ha = phy->ha;
sas_phy_event_fns[ev->event](work);
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
/* defer works of new phys during suspend */
static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev)
{
struct sas_ha_struct *ha = phy->ha;
unsigned long flags;
bool deferred = false;
spin_lock_irqsave(&ha->lock, flags);
if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) {
struct sas_work *sw = &ev->work;
list_add_tail(&sw->drain_node, &ha->defer_q);
deferred = true;
}
spin_unlock_irqrestore(&ha->lock, flags);
return deferred;
}
void sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
gfp_t gfp_flags)
{
struct sas_ha_struct *ha = phy->ha;
struct asd_sas_event *ev;
BUG_ON(event >= PORT_NUM_EVENTS);
ev = sas_alloc_event(phy, gfp_flags);
if (!ev)
return;
/* Call pm_runtime_put() with pairs in sas_port_event_worker() */
pm_runtime_get_noresume(ha->dev);
INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
if (sas_defer_event(phy, ev))
return;
if (!sas_queue_event(event, &ev->work, ha)) {
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
}
EXPORT_SYMBOL_GPL(sas_notify_port_event);
void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
gfp_t gfp_flags)
{
struct sas_ha_struct *ha = phy->ha;
struct asd_sas_event *ev;
BUG_ON(event >= PHY_NUM_EVENTS);
ev = sas_alloc_event(phy, gfp_flags);
if (!ev)
return;
/* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
pm_runtime_get_noresume(ha->dev);
INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
if (sas_defer_event(phy, ev))
return;
if (!sas_queue_event(event, &ev->work, ha)) {
pm_runtime_put(ha->dev);
sas_free_event(ev);
}
}
EXPORT_SYMBOL_GPL(sas_notify_phy_event);
| linux-master | drivers/scsi/libsas/sas_event.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Serial Attached SCSI (SAS) Transport Layer initialization
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <scsi/sas_ata.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "sas_internal.h"
#include "scsi_sas_internal.h"
static struct kmem_cache *sas_task_cache;
static struct kmem_cache *sas_event_cache;
struct sas_task *sas_alloc_task(gfp_t flags)
{
struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags);
if (task) {
spin_lock_init(&task->task_state_lock);
task->task_state_flags = SAS_TASK_STATE_PENDING;
}
return task;
}
struct sas_task *sas_alloc_slow_task(gfp_t flags)
{
struct sas_task *task = sas_alloc_task(flags);
struct sas_task_slow *slow = kmalloc(sizeof(*slow), flags);
if (!task || !slow) {
if (task)
kmem_cache_free(sas_task_cache, task);
kfree(slow);
return NULL;
}
task->slow_task = slow;
slow->task = task;
timer_setup(&slow->timer, NULL, 0);
init_completion(&slow->completion);
return task;
}
void sas_free_task(struct sas_task *task)
{
if (task) {
kfree(task->slow_task);
kmem_cache_free(sas_task_cache, task);
}
}
/*------------ SAS addr hash -----------*/
void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
{
const u32 poly = 0x00DB2777;
u32 r = 0;
int i;
for (i = 0; i < SAS_ADDR_SIZE; i++) {
int b;
for (b = (SAS_ADDR_SIZE - 1); b >= 0; b--) {
r <<= 1;
if ((1 << b) & sas_addr[i]) {
if (!(r & 0x01000000))
r ^= poly;
} else if (r & 0x01000000) {
r ^= poly;
}
}
}
hashed[0] = (r >> 16) & 0xFF;
hashed[1] = (r >> 8) & 0xFF;
hashed[2] = r & 0xFF;
}
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
char name[64];
int error = 0;
mutex_init(&sas_ha->disco_mutex);
spin_lock_init(&sas_ha->phy_port_lock);
sas_hash_addr(sas_ha->hashed_sas_addr, sas_ha->sas_addr);
set_bit(SAS_HA_REGISTERED, &sas_ha->state);
spin_lock_init(&sas_ha->lock);
mutex_init(&sas_ha->drain_mutex);
init_waitqueue_head(&sas_ha->eh_wait_q);
INIT_LIST_HEAD(&sas_ha->defer_q);
INIT_LIST_HEAD(&sas_ha->eh_dev_q);
sas_ha->event_thres = SAS_PHY_SHUTDOWN_THRES;
error = sas_register_phys(sas_ha);
if (error) {
pr_notice("couldn't register sas phys:%d\n", error);
return error;
}
error = sas_register_ports(sas_ha);
if (error) {
pr_notice("couldn't register sas ports:%d\n", error);
goto Undo_phys;
}
error = -ENOMEM;
snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
sas_ha->event_q = create_singlethread_workqueue(name);
if (!sas_ha->event_q)
goto Undo_ports;
snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
sas_ha->disco_q = create_singlethread_workqueue(name);
if (!sas_ha->disco_q)
goto Undo_event_q;
INIT_LIST_HEAD(&sas_ha->eh_done_q);
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
Undo_event_q:
destroy_workqueue(sas_ha->event_q);
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
return error;
}
EXPORT_SYMBOL_GPL(sas_register_ha);
static void sas_disable_events(struct sas_ha_struct *sas_ha)
{
/* Set the state to unregistered to avoid further unchained
* events to be queued, and flush any in-progress drainers
*/
mutex_lock(&sas_ha->drain_mutex);
spin_lock_irq(&sas_ha->lock);
clear_bit(SAS_HA_REGISTERED, &sas_ha->state);
spin_unlock_irq(&sas_ha->lock);
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
}
int sas_unregister_ha(struct sas_ha_struct *sas_ha)
{
sas_disable_events(sas_ha);
sas_unregister_ports(sas_ha);
/* flush unregistration work */
mutex_lock(&sas_ha->drain_mutex);
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
destroy_workqueue(sas_ha->disco_q);
destroy_workqueue(sas_ha->event_q);
return 0;
}
EXPORT_SYMBOL_GPL(sas_unregister_ha);
static int sas_get_linkerrors(struct sas_phy *phy)
{
if (scsi_is_sas_phy_local(phy)) {
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL);
}
return sas_smp_get_phy_events(phy);
}
int sas_try_ata_reset(struct asd_sas_phy *asd_phy)
{
struct domain_device *dev = NULL;
/* try to route user requested link resets through libata */
if (asd_phy->port)
dev = asd_phy->port->port_dev;
/* validate that dev has been probed */
if (dev)
dev = sas_find_dev_by_rphy(dev->rphy);
if (dev && dev_is_sata(dev)) {
sas_ata_schedule_reset(dev);
sas_ata_wait_eh(dev);
return 0;
}
return -ENODEV;
}
/*
* transport_sas_phy_reset - reset a phy and permit libata to manage the link
*
* phy reset request via sysfs in host workqueue context so we know we
* can block on eh and safely traverse the domain_device topology
*/
static int transport_sas_phy_reset(struct sas_phy *phy, int hard_reset)
{
enum phy_func reset_type;
if (hard_reset)
reset_type = PHY_FUNC_HARD_RESET;
else
reset_type = PHY_FUNC_LINK_RESET;
if (scsi_is_sas_phy_local(phy)) {
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
if (!hard_reset && sas_try_ata_reset(asd_phy) == 0)
return 0;
return i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
} else {
struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
struct domain_device *ata_dev = sas_ex_to_ata(ddev, phy->number);
if (ata_dev && !hard_reset) {
sas_ata_schedule_reset(ata_dev);
sas_ata_wait_eh(ata_dev);
return 0;
} else
return sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
}
}
int sas_phy_enable(struct sas_phy *phy, int enable)
{
int ret;
enum phy_func cmd;
if (enable)
cmd = PHY_FUNC_LINK_RESET;
else
cmd = PHY_FUNC_DISABLE;
if (scsi_is_sas_phy_local(phy)) {
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
if (enable)
ret = transport_sas_phy_reset(phy, 0);
else
ret = i->dft->lldd_control_phy(asd_phy, cmd, NULL);
} else {
struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
if (enable)
ret = transport_sas_phy_reset(phy, 0);
else
ret = sas_smp_phy_control(ddev, phy->number, cmd, NULL);
}
return ret;
}
EXPORT_SYMBOL_GPL(sas_phy_enable);
int sas_phy_reset(struct sas_phy *phy, int hard_reset)
{
int ret;
enum phy_func reset_type;
if (!phy->enabled)
return -ENODEV;
if (hard_reset)
reset_type = PHY_FUNC_HARD_RESET;
else
reset_type = PHY_FUNC_LINK_RESET;
if (scsi_is_sas_phy_local(phy)) {
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
ret = i->dft->lldd_control_phy(asd_phy, reset_type, NULL);
} else {
struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
ret = sas_smp_phy_control(ddev, phy->number, reset_type, NULL);
}
return ret;
}
EXPORT_SYMBOL_GPL(sas_phy_reset);
int sas_set_phy_speed(struct sas_phy *phy,
struct sas_phy_linkrates *rates)
{
int ret;
if ((rates->minimum_linkrate &&
rates->minimum_linkrate > phy->maximum_linkrate) ||
(rates->maximum_linkrate &&
rates->maximum_linkrate < phy->minimum_linkrate))
return -EINVAL;
if (rates->minimum_linkrate &&
rates->minimum_linkrate < phy->minimum_linkrate_hw)
rates->minimum_linkrate = phy->minimum_linkrate_hw;
if (rates->maximum_linkrate &&
rates->maximum_linkrate > phy->maximum_linkrate_hw)
rates->maximum_linkrate = phy->maximum_linkrate_hw;
if (scsi_is_sas_phy_local(phy)) {
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number];
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
ret = i->dft->lldd_control_phy(asd_phy, PHY_FUNC_SET_LINK_RATE,
rates);
} else {
struct sas_rphy *rphy = dev_to_rphy(phy->dev.parent);
struct domain_device *ddev = sas_find_dev_by_rphy(rphy);
ret = sas_smp_phy_control(ddev, phy->number,
PHY_FUNC_LINK_RESET, rates);
}
return ret;
}
void sas_prep_resume_ha(struct sas_ha_struct *ha)
{
int i;
set_bit(SAS_HA_REGISTERED, &ha->state);
set_bit(SAS_HA_RESUMING, &ha->state);
/* clear out any stale link events/data from the suspension path */
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_phy *phy = ha->sas_phy[i];
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
phy->frame_rcvd_size = 0;
}
}
EXPORT_SYMBOL(sas_prep_resume_ha);
static int phys_suspended(struct sas_ha_struct *ha)
{
int i, rc = 0;
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_phy *phy = ha->sas_phy[i];
if (phy->suspended)
rc++;
}
return rc;
}
static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha)
{
int i;
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
struct domain_device *dev = port->port_dev;
if (dev && dev_is_expander(dev->dev_type)) {
struct asd_sas_phy *first_phy;
spin_lock(&port->phy_list_lock);
first_phy = list_first_entry_or_null(
&port->phy_list, struct asd_sas_phy,
port_phy_el);
spin_unlock(&port->phy_list_lock);
if (first_phy)
sas_notify_port_event(first_phy,
PORTE_BROADCAST_RCVD, GFP_KERNEL);
}
}
}
static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
{
const unsigned long tmo = msecs_to_jiffies(25000);
int i;
/* deform ports on phys that did not resume
* at this point we may be racing the phy coming back (as posted
* by the lldd). So we post the event and once we are in the
* libsas context check that the phy remains suspended before
* tearing it down.
*/
i = phys_suspended(ha);
if (i)
dev_info(ha->dev, "waiting up to 25 seconds for %d phy%s to resume\n",
i, i > 1 ? "s" : "");
wait_event_timeout(ha->eh_wait_q, phys_suspended(ha) == 0, tmo);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_phy *phy = ha->sas_phy[i];
if (phy->suspended) {
dev_warn(&phy->phy->dev, "resume timeout\n");
sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT,
GFP_KERNEL);
}
}
/* all phys are back up or timed out, turn on i/o so we can
* flush out disks that did not return
*/
scsi_unblock_requests(ha->shost);
if (drain)
sas_drain_work(ha);
clear_bit(SAS_HA_RESUMING, &ha->state);
sas_queue_deferred_work(ha);
/* send event PORTE_BROADCAST_RCVD to identify some new inserted
* disks for expander
*/
sas_resume_insert_broadcast_ha(ha);
}
void sas_resume_ha(struct sas_ha_struct *ha)
{
_sas_resume_ha(ha, true);
}
EXPORT_SYMBOL(sas_resume_ha);
/* A no-sync variant, which does not call sas_drain_ha(). */
void sas_resume_ha_no_sync(struct sas_ha_struct *ha)
{
_sas_resume_ha(ha, false);
}
EXPORT_SYMBOL(sas_resume_ha_no_sync);
void sas_suspend_ha(struct sas_ha_struct *ha)
{
int i;
sas_disable_events(ha);
scsi_block_requests(ha->shost);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
sas_discover_event(port, DISCE_SUSPEND);
}
/* flush suspend events while unregistered */
mutex_lock(&ha->drain_mutex);
__sas_drain_work(ha);
mutex_unlock(&ha->drain_mutex);
}
EXPORT_SYMBOL(sas_suspend_ha);
static void sas_phy_release(struct sas_phy *phy)
{
kfree(phy->hostdata);
phy->hostdata = NULL;
}
static void phy_reset_work(struct work_struct *work)
{
struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work);
d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset);
}
static void phy_enable_work(struct work_struct *work)
{
struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work);
d->enable_result = sas_phy_enable(d->phy, d->enable);
}
static int sas_phy_setup(struct sas_phy *phy)
{
struct sas_phy_data *d = kzalloc(sizeof(*d), GFP_KERNEL);
if (!d)
return -ENOMEM;
mutex_init(&d->event_lock);
INIT_SAS_WORK(&d->reset_work, phy_reset_work);
INIT_SAS_WORK(&d->enable_work, phy_enable_work);
d->phy = phy;
phy->hostdata = d;
return 0;
}
static int queue_phy_reset(struct sas_phy *phy, int hard_reset)
{
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct sas_phy_data *d = phy->hostdata;
int rc;
if (!d)
return -ENOMEM;
pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->reset_result = 0;
d->hard_reset = hard_reset;
spin_lock_irq(&ha->lock);
sas_queue_work(ha, &d->reset_work);
spin_unlock_irq(&ha->lock);
rc = sas_drain_work(ha);
if (rc == 0)
rc = d->reset_result;
mutex_unlock(&d->event_lock);
pm_runtime_put_sync(ha->dev);
return rc;
}
static int queue_phy_enable(struct sas_phy *phy, int enable)
{
struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct sas_phy_data *d = phy->hostdata;
int rc;
if (!d)
return -ENOMEM;
pm_runtime_get_sync(ha->dev);
/* libsas workqueue coordinates ata-eh reset with discovery */
mutex_lock(&d->event_lock);
d->enable_result = 0;
d->enable = enable;
spin_lock_irq(&ha->lock);
sas_queue_work(ha, &d->enable_work);
spin_unlock_irq(&ha->lock);
rc = sas_drain_work(ha);
if (rc == 0)
rc = d->enable_result;
mutex_unlock(&d->event_lock);
pm_runtime_put_sync(ha->dev);
return rc;
}
static struct sas_function_template sft = {
.phy_enable = queue_phy_enable,
.phy_reset = queue_phy_reset,
.phy_setup = sas_phy_setup,
.phy_release = sas_phy_release,
.set_phy_speed = sas_set_phy_speed,
.get_linkerrors = sas_get_linkerrors,
.smp_handler = sas_smp_handler,
};
static inline ssize_t phy_event_threshold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
return scnprintf(buf, PAGE_SIZE, "%u\n", sha->event_thres);
}
static inline ssize_t phy_event_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct Scsi_Host *shost = class_to_shost(dev);
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
sha->event_thres = simple_strtol(buf, NULL, 10);
/* threshold cannot be set too small */
if (sha->event_thres < 32)
sha->event_thres = 32;
return count;
}
DEVICE_ATTR(phy_event_threshold,
S_IRUGO|S_IWUSR,
phy_event_threshold_show,
phy_event_threshold_store);
EXPORT_SYMBOL_GPL(dev_attr_phy_event_threshold);
struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *dft)
{
struct scsi_transport_template *stt = sas_attach_transport(&sft);
struct sas_internal *i;
if (!stt)
return stt;
i = to_sas_internal(stt);
i->dft = dft;
stt->create_work_queue = 1;
stt->eh_strategy_handler = sas_scsi_recover_host;
return stt;
}
EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy,
gfp_t gfp_flags)
{
struct asd_sas_event *event;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
event = kmem_cache_zalloc(sas_event_cache, gfp_flags);
if (!event)
return NULL;
atomic_inc(&phy->event_nr);
if (atomic_read(&phy->event_nr) > phy->ha->event_thres) {
if (i->dft->lldd_control_phy) {
if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
pr_notice("The phy%d bursting events, shut it down.\n",
phy->id);
sas_notify_phy_event(phy, PHYE_SHUTDOWN,
gfp_flags);
}
} else {
/* Do not support PHY control, stop allocating events */
WARN_ONCE(1, "PHY control not supported.\n");
kmem_cache_free(sas_event_cache, event);
atomic_dec(&phy->event_nr);
event = NULL;
}
}
return event;
}
void sas_free_event(struct asd_sas_event *event)
{
struct asd_sas_phy *phy = event->phy;
kmem_cache_free(sas_event_cache, event);
atomic_dec(&phy->event_nr);
}
/* ---------- SAS Class register/unregister ---------- */
static int __init sas_class_init(void)
{
sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN);
if (!sas_task_cache)
goto out;
sas_event_cache = KMEM_CACHE(asd_sas_event, SLAB_HWCACHE_ALIGN);
if (!sas_event_cache)
goto free_task_kmem;
return 0;
free_task_kmem:
kmem_cache_destroy(sas_task_cache);
out:
return -ENOMEM;
}
static void __exit sas_class_exit(void)
{
kmem_cache_destroy(sas_task_cache);
kmem_cache_destroy(sas_event_cache);
}
MODULE_AUTHOR("Luben Tuikov <[email protected]>");
MODULE_DESCRIPTION("SAS Transport Layer");
MODULE_LICENSE("GPL v2");
module_init(sas_class_init);
module_exit(sas_class_exit);
| linux-master | drivers/scsi/libsas/sas_init.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Serial Attached SCSI (SAS) class SCSI Host glue.
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <[email protected]>
*/
#include <linux/kthread.h>
#include <linux/firmware.h>
#include <linux/export.h>
#include <linux/ctype.h>
#include <linux/kernel.h>
#include "sas_internal.h"
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
#include "scsi_sas_internal.h"
#include "scsi_transport_api.h"
#include "scsi_priv.h"
#include <linux/err.h>
#include <linux/blkdev.h>
#include <linux/freezer.h>
#include <linux/gfp.h>
#include <linux/scatterlist.h>
#include <linux/libata.h>
/* record final status and free the task */
static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
enum scsi_host_status hs = DID_OK;
enum exec_status stat = SAS_SAM_STAT_GOOD;
if (ts->resp == SAS_TASK_UNDELIVERED) {
/* transport error */
hs = DID_NO_CONNECT;
} else { /* ts->resp == SAS_TASK_COMPLETE */
/* task delivered, what happened afterwards? */
switch (ts->stat) {
case SAS_DEV_NO_RESPONSE:
case SAS_INTERRUPTED:
case SAS_PHY_DOWN:
case SAS_NAK_R_ERR:
case SAS_OPEN_TO:
hs = DID_NO_CONNECT;
break;
case SAS_DATA_UNDERRUN:
scsi_set_resid(sc, ts->residual);
if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
hs = DID_ERROR;
break;
case SAS_DATA_OVERRUN:
hs = DID_ERROR;
break;
case SAS_QUEUE_FULL:
hs = DID_SOFT_ERROR; /* retry */
break;
case SAS_DEVICE_UNKNOWN:
hs = DID_BAD_TARGET;
break;
case SAS_OPEN_REJECT:
if (ts->open_rej_reason == SAS_OREJ_RSVD_RETRY)
hs = DID_SOFT_ERROR; /* retry */
else
hs = DID_ERROR;
break;
case SAS_PROTO_RESPONSE:
pr_notice("LLDD:%s sent SAS_PROTO_RESP for an SSP task; please report this\n",
task->dev->port->ha->sas_ha_name);
break;
case SAS_ABORTED_TASK:
hs = DID_ABORT;
break;
case SAS_SAM_STAT_CHECK_CONDITION:
memcpy(sc->sense_buffer, ts->buf,
min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
default:
stat = ts->stat;
break;
}
}
sc->result = (hs << 16) | stat;
ASSIGN_SAS_TASK(sc, NULL);
sas_free_task(task);
}
static void sas_scsi_task_done(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
struct domain_device *dev = task->dev;
struct sas_ha_struct *ha = dev->port->ha;
unsigned long flags;
spin_lock_irqsave(&dev->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &ha->state))
task = NULL;
else
ASSIGN_SAS_TASK(sc, NULL);
spin_unlock_irqrestore(&dev->done_lock, flags);
if (unlikely(!task)) {
/* task will be completed by the error handler */
pr_debug("task done but aborted\n");
return;
}
if (unlikely(!sc)) {
pr_debug("task_done called with non existing SCSI cmnd!\n");
sas_free_task(task);
return;
}
sas_end_task(sc, task);
scsi_done(sc);
}
static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
struct domain_device *dev,
gfp_t gfp_flags)
{
struct sas_task *task = sas_alloc_task(gfp_flags);
struct scsi_lun lun;
if (!task)
return NULL;
task->uldd_task = cmd;
ASSIGN_SAS_TASK(cmd, task);
task->dev = dev;
task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */
int_to_scsilun(cmd->device->lun, &lun);
memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8);
task->ssp_task.task_attr = TASK_ATTR_SIMPLE;
task->ssp_task.cmd = cmd;
task->scatter = scsi_sglist(cmd);
task->num_scatter = scsi_sg_count(cmd);
task->total_xfer_len = scsi_bufflen(cmd);
task->data_dir = cmd->sc_data_direction;
task->task_done = sas_scsi_task_done;
return task;
}
int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct sas_internal *i = to_sas_internal(host->transportt);
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_task *task;
int res = 0;
/* If the device fell off, no sense in issuing commands */
if (test_bit(SAS_DEV_GONE, &dev->state)) {
cmd->result = DID_BAD_TARGET << 16;
goto out_done;
}
if (dev_is_sata(dev)) {
spin_lock_irq(dev->sata_dev.ap->lock);
res = ata_sas_queuecmd(cmd, dev->sata_dev.ap);
spin_unlock_irq(dev->sata_dev.ap->lock);
return res;
}
task = sas_create_task(cmd, dev, GFP_ATOMIC);
if (!task)
return SCSI_MLQUEUE_HOST_BUSY;
res = i->dft->lldd_execute_task(task, GFP_ATOMIC);
if (res)
goto out_free_task;
return 0;
out_free_task:
pr_debug("lldd_execute_task returned: %d\n", res);
ASSIGN_SAS_TASK(cmd, NULL);
sas_free_task(task);
if (res == -SAS_QUEUE_FULL)
cmd->result = DID_SOFT_ERROR << 16; /* retry */
else
cmd->result = DID_ERROR << 16;
out_done:
scsi_done(cmd);
return 0;
}
EXPORT_SYMBOL_GPL(sas_queuecommand);
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_task *task = TO_SAS_TASK(cmd);
/* At this point, we only get called following an actual abort
* of the task, so we should be guaranteed not to be racing with
* any completions from the LLD. Task is freed after this.
*/
sas_end_task(cmd, task);
if (dev_is_sata(dev)) {
/* defer commands to libata so that libata EH can
* handle ata qcs correctly
*/
list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
return;
}
/* now finish the command and move it on to the error
* handler done list, this also takes it off the
* error handler pending list.
*/
scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
}
static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
cmd->device->lun == my_cmd->device->lun)
sas_eh_finish_cmd(cmd);
}
}
static void sas_scsi_clear_queue_I_T(struct list_head *error_q,
struct domain_device *dev)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
struct domain_device *x = cmd_to_domain_dev(cmd);
if (x == dev)
sas_eh_finish_cmd(cmd);
}
}
static void sas_scsi_clear_queue_port(struct list_head *error_q,
struct asd_sas_port *port)
{
struct scsi_cmnd *cmd, *n;
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct asd_sas_port *x = dev->port;
if (x == port)
sas_eh_finish_cmd(cmd);
}
}
enum task_disposition {
TASK_IS_DONE,
TASK_IS_ABORTED,
TASK_IS_AT_LU,
TASK_IS_NOT_AT_LU,
TASK_ABORT_FAILED,
};
static enum task_disposition sas_scsi_find_task(struct sas_task *task)
{
unsigned long flags;
int i, res;
struct sas_internal *si =
to_sas_internal(task->dev->port->ha->shost->transportt);
for (i = 0; i < 5; i++) {
pr_notice("%s: aborting task 0x%p\n", __func__, task);
res = si->dft->lldd_abort_task(task);
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
pr_debug("%s: task 0x%p is done\n", __func__, task);
return TASK_IS_DONE;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (res == TMF_RESP_FUNC_COMPLETE) {
pr_notice("%s: task 0x%p is aborted\n",
__func__, task);
return TASK_IS_ABORTED;
} else if (si->dft->lldd_query_task) {
pr_notice("%s: querying task 0x%p\n", __func__, task);
res = si->dft->lldd_query_task(task);
switch (res) {
case TMF_RESP_FUNC_SUCC:
pr_notice("%s: task 0x%p at LU\n", __func__,
task);
return TASK_IS_AT_LU;
case TMF_RESP_FUNC_COMPLETE:
pr_notice("%s: task 0x%p not at LU\n",
__func__, task);
return TASK_IS_NOT_AT_LU;
case TMF_RESP_FUNC_FAILED:
pr_notice("%s: task 0x%p failed to abort\n",
__func__, task);
return TASK_ABORT_FAILED;
default:
pr_notice("%s: task 0x%p result code %d not handled\n",
__func__, task, res);
}
}
}
return TASK_ABORT_FAILED;
}
static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd)
{
int res = TMF_RESP_FUNC_FAILED;
struct scsi_lun lun;
struct sas_internal *i =
to_sas_internal(dev->port->ha->shost->transportt);
int_to_scsilun(cmd->device->lun, &lun);
pr_notice("eh: device %016llx LUN 0x%llx has the task\n",
SAS_ADDR(dev->sas_addr),
cmd->device->lun);
if (i->dft->lldd_abort_task_set)
res = i->dft->lldd_abort_task_set(dev, lun.scsi_lun);
if (res == TMF_RESP_FUNC_FAILED) {
if (i->dft->lldd_clear_task_set)
res = i->dft->lldd_clear_task_set(dev, lun.scsi_lun);
}
if (res == TMF_RESP_FUNC_FAILED) {
if (i->dft->lldd_lu_reset)
res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
}
return res;
}
static int sas_recover_I_T(struct domain_device *dev)
{
int res = TMF_RESP_FUNC_FAILED;
struct sas_internal *i =
to_sas_internal(dev->port->ha->shost->transportt);
pr_notice("I_T nexus reset for dev %016llx\n",
SAS_ADDR(dev->sas_addr));
if (i->dft->lldd_I_T_nexus_reset)
res = i->dft->lldd_I_T_nexus_reset(dev);
return res;
}
/* take a reference on the last known good phy for this device */
struct sas_phy *sas_get_local_phy(struct domain_device *dev)
{
struct sas_ha_struct *ha = dev->port->ha;
struct sas_phy *phy;
unsigned long flags;
/* a published domain device always has a valid phy, it may be
* stale, but it is never NULL
*/
BUG_ON(!dev->phy);
spin_lock_irqsave(&ha->phy_port_lock, flags);
phy = dev->phy;
get_device(&phy->dev);
spin_unlock_irqrestore(&ha->phy_port_lock, flags);
return phy;
}
EXPORT_SYMBOL_GPL(sas_get_local_phy);
static int sas_queue_reset(struct domain_device *dev, int reset_type, u64 lun)
{
struct sas_ha_struct *ha = dev->port->ha;
int scheduled = 0, tries = 100;
/* ata: promote lun reset to bus reset */
if (dev_is_sata(dev)) {
sas_ata_schedule_reset(dev);
return SUCCESS;
}
while (!scheduled && tries--) {
spin_lock_irq(&ha->lock);
if (!test_bit(SAS_DEV_EH_PENDING, &dev->state) &&
!test_bit(reset_type, &dev->state)) {
scheduled = 1;
ha->eh_active++;
list_add_tail(&dev->ssp_dev.eh_list_node, &ha->eh_dev_q);
set_bit(SAS_DEV_EH_PENDING, &dev->state);
set_bit(reset_type, &dev->state);
int_to_scsilun(lun, &dev->ssp_dev.reset_lun);
scsi_schedule_eh(ha->shost);
}
spin_unlock_irq(&ha->lock);
if (scheduled)
return SUCCESS;
}
pr_warn("%s reset of %s failed\n",
reset_type == SAS_DEV_LU_RESET ? "LUN" : "Bus",
dev_name(&dev->rphy->dev));
return FAILED;
}
int sas_eh_abort_handler(struct scsi_cmnd *cmd)
{
int res = TMF_RESP_FUNC_FAILED;
struct sas_task *task = TO_SAS_TASK(cmd);
struct Scsi_Host *host = cmd->device->host;
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i = to_sas_internal(host->transportt);
unsigned long flags;
if (!i->dft->lldd_abort_task)
return FAILED;
spin_lock_irqsave(host->host_lock, flags);
/* We cannot do async aborts for SATA devices */
if (dev_is_sata(dev) && !host->host_eh_scheduled) {
spin_unlock_irqrestore(host->host_lock, flags);
return FAILED;
}
spin_unlock_irqrestore(host->host_lock, flags);
if (task)
res = i->dft->lldd_abort_task(task);
else
pr_notice("no task to abort\n");
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
return FAILED;
}
EXPORT_SYMBOL_GPL(sas_eh_abort_handler);
/* Attempt to send a LUN reset message to a device */
int sas_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
int res;
struct scsi_lun lun;
struct Scsi_Host *host = cmd->device->host;
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i = to_sas_internal(host->transportt);
if (current != host->ehandler)
return sas_queue_reset(dev, SAS_DEV_LU_RESET, cmd->device->lun);
int_to_scsilun(cmd->device->lun, &lun);
if (!i->dft->lldd_lu_reset)
return FAILED;
res = i->dft->lldd_lu_reset(dev, lun.scsi_lun);
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE)
return SUCCESS;
return FAILED;
}
EXPORT_SYMBOL_GPL(sas_eh_device_reset_handler);
int sas_eh_target_reset_handler(struct scsi_cmnd *cmd)
{
int res;
struct Scsi_Host *host = cmd->device->host;
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_internal *i = to_sas_internal(host->transportt);
if (current != host->ehandler)
return sas_queue_reset(dev, SAS_DEV_RESET, 0);
if (!i->dft->lldd_I_T_nexus_reset)
return FAILED;
res = i->dft->lldd_I_T_nexus_reset(dev);
if (res == TMF_RESP_FUNC_SUCC || res == TMF_RESP_FUNC_COMPLETE ||
res == -ENODEV)
return SUCCESS;
return FAILED;
}
EXPORT_SYMBOL_GPL(sas_eh_target_reset_handler);
/* Try to reset a device */
static int try_to_reset_cmd_device(struct scsi_cmnd *cmd)
{
int res;
struct Scsi_Host *shost = cmd->device->host;
if (!shost->hostt->eh_device_reset_handler)
goto try_target_reset;
res = shost->hostt->eh_device_reset_handler(cmd);
if (res == SUCCESS)
return res;
try_target_reset:
if (shost->hostt->eh_target_reset_handler)
return shost->hostt->eh_target_reset_handler(cmd);
return FAILED;
}
static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *work_q)
{
struct scsi_cmnd *cmd, *n;
enum task_disposition res = TASK_IS_DONE;
int tmf_resp, need_reset;
struct sas_internal *i = to_sas_internal(shost->transportt);
unsigned long flags;
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
LIST_HEAD(done);
/* clean out any commands that won the completion vs eh race */
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_task *task;
spin_lock_irqsave(&dev->done_lock, flags);
/* by this point the lldd has either observed
* SAS_HA_FROZEN and is leaving the task alone, or has
* won the race with eh and decided to complete it
*/
task = TO_SAS_TASK(cmd);
spin_unlock_irqrestore(&dev->done_lock, flags);
if (!task)
list_move_tail(&cmd->eh_entry, &done);
}
Again:
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct sas_task *task = TO_SAS_TASK(cmd);
list_del_init(&cmd->eh_entry);
spin_lock_irqsave(&task->task_state_lock, flags);
need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET;
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (need_reset) {
pr_notice("%s: task 0x%p requests reset\n",
__func__, task);
goto reset;
}
pr_debug("trying to find task 0x%p\n", task);
res = sas_scsi_find_task(task);
switch (res) {
case TASK_IS_DONE:
pr_notice("%s: task 0x%p is done\n", __func__,
task);
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_ABORTED:
pr_notice("%s: task 0x%p is aborted\n",
__func__, task);
sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_AT_LU:
pr_info("task 0x%p is at LU: lu recover\n", task);
reset:
tmf_resp = sas_recover_lu(task->dev, cmd);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE) {
pr_notice("dev %016llx LU 0x%llx is recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
fallthrough;
case TASK_IS_NOT_AT_LU:
case TASK_ABORT_FAILED:
pr_notice("task 0x%p is not at LU: I_T recover\n",
task);
tmf_resp = sas_recover_I_T(task->dev);
if (tmf_resp == TMF_RESP_FUNC_COMPLETE ||
tmf_resp == -ENODEV) {
struct domain_device *dev = task->dev;
pr_notice("I_T %016llx recovered\n",
SAS_ADDR(task->dev->sas_addr));
sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_I_T(work_q, dev);
goto Again;
}
/* Hammer time :-) */
try_to_reset_cmd_device(cmd);
if (i->dft->lldd_clear_nexus_port) {
struct asd_sas_port *port = task->dev->port;
pr_debug("clearing nexus for port:%d\n",
port->id);
res = i->dft->lldd_clear_nexus_port(port);
if (res == TMF_RESP_FUNC_COMPLETE) {
pr_notice("clear nexus port:%d succeeded\n",
port->id);
sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_port(work_q,
port);
goto Again;
}
}
if (i->dft->lldd_clear_nexus_ha) {
pr_debug("clear nexus ha\n");
res = i->dft->lldd_clear_nexus_ha(ha);
if (res == TMF_RESP_FUNC_COMPLETE) {
pr_notice("clear nexus ha succeeded\n");
sas_eh_finish_cmd(cmd);
goto clear_q;
}
}
/* If we are here -- this means that no amount
* of effort could recover from errors. Quite
* possibly the HA just disappeared.
*/
pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n",
SAS_ADDR(task->dev->sas_addr),
cmd->device->lun);
sas_eh_finish_cmd(cmd);
goto clear_q;
}
}
out:
list_splice_tail(&done, work_q);
list_splice_tail_init(&ha->eh_ata_q, work_q);
return;
clear_q:
pr_debug("--- Exit %s -- clear_q\n", __func__);
list_for_each_entry_safe(cmd, n, work_q, eh_entry)
sas_eh_finish_cmd(cmd);
goto out;
}
static void sas_eh_handle_resets(struct Scsi_Host *shost)
{
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct sas_internal *i = to_sas_internal(shost->transportt);
/* handle directed resets to sas devices */
spin_lock_irq(&ha->lock);
while (!list_empty(&ha->eh_dev_q)) {
struct domain_device *dev;
struct ssp_device *ssp;
ssp = list_entry(ha->eh_dev_q.next, typeof(*ssp), eh_list_node);
list_del_init(&ssp->eh_list_node);
dev = container_of(ssp, typeof(*dev), ssp_dev);
kref_get(&dev->kref);
WARN_ONCE(dev_is_sata(dev), "ssp reset to ata device?\n");
spin_unlock_irq(&ha->lock);
if (test_and_clear_bit(SAS_DEV_LU_RESET, &dev->state))
i->dft->lldd_lu_reset(dev, ssp->reset_lun.scsi_lun);
if (test_and_clear_bit(SAS_DEV_RESET, &dev->state))
i->dft->lldd_I_T_nexus_reset(dev);
sas_put_device(dev);
spin_lock_irq(&ha->lock);
clear_bit(SAS_DEV_EH_PENDING, &dev->state);
ha->eh_active--;
}
spin_unlock_irq(&ha->lock);
}
void sas_scsi_recover_host(struct Scsi_Host *shost)
{
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
LIST_HEAD(eh_work_q);
int tries = 0;
bool retry;
retry:
tries++;
retry = true;
spin_lock_irq(shost->host_lock);
list_splice_init(&shost->eh_cmd_q, &eh_work_q);
spin_unlock_irq(shost->host_lock);
pr_notice("Enter %s busy: %d failed: %d\n",
__func__, scsi_host_busy(shost), shost->host_failed);
/*
* Deal with commands that still have SAS tasks (i.e. they didn't
* complete via the normal sas_task completion mechanism),
* SAS_HA_FROZEN gives eh dominion over all sas_task completion.
*/
set_bit(SAS_HA_FROZEN, &ha->state);
sas_eh_handle_sas_errors(shost, &eh_work_q);
clear_bit(SAS_HA_FROZEN, &ha->state);
if (list_empty(&eh_work_q))
goto out;
/*
* Now deal with SCSI commands that completed ok but have a an error
* code (and hopefully sense data) attached. This is roughly what
* scsi_unjam_host does, but we skip scsi_eh_abort_cmds because any
* command we see here has no sas_task and is thus unknown to the HA.
*/
sas_ata_eh(shost, &eh_work_q);
if (!scsi_eh_get_sense(&eh_work_q, &ha->eh_done_q))
scsi_eh_ready_devs(shost, &eh_work_q, &ha->eh_done_q);
out:
sas_eh_handle_resets(shost);
/* now link into libata eh --- if we have any ata devices */
sas_ata_strategy_handler(shost);
scsi_eh_flush_done_q(&ha->eh_done_q);
/* check if any new eh work was scheduled during the last run */
spin_lock_irq(&ha->lock);
if (ha->eh_active == 0) {
shost->host_eh_scheduled = 0;
retry = false;
}
spin_unlock_irq(&ha->lock);
if (retry)
goto retry;
pr_notice("--- Exit %s: busy: %d failed: %d tries: %d\n",
__func__, scsi_host_busy(shost),
shost->host_failed, tries);
}
int sas_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
return ata_sas_scsi_ioctl(dev->sata_dev.ap, sdev, cmd, arg);
return -EINVAL;
}
EXPORT_SYMBOL_GPL(sas_ioctl);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy)
{
struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent);
struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost);
struct domain_device *found_dev = NULL;
int i;
unsigned long flags;
spin_lock_irqsave(&ha->phy_port_lock, flags);
for (i = 0; i < ha->num_phys; i++) {
struct asd_sas_port *port = ha->sas_port[i];
struct domain_device *dev;
spin_lock(&port->dev_list_lock);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
if (rphy == dev->rphy) {
found_dev = dev;
spin_unlock(&port->dev_list_lock);
goto found;
}
}
spin_unlock(&port->dev_list_lock);
}
found:
spin_unlock_irqrestore(&ha->phy_port_lock, flags);
return found_dev;
}
int sas_target_alloc(struct scsi_target *starget)
{
struct sas_rphy *rphy = dev_to_rphy(starget->dev.parent);
struct domain_device *found_dev = sas_find_dev_by_rphy(rphy);
if (!found_dev)
return -ENODEV;
kref_get(&found_dev->kref);
starget->hostdata = found_dev;
return 0;
}
EXPORT_SYMBOL_GPL(sas_target_alloc);
#define SAS_DEF_QD 256
int sas_slave_configure(struct scsi_device *scsi_dev)
{
struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE);
if (dev_is_sata(dev)) {
ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap);
return 0;
}
sas_read_port_mode_page(scsi_dev);
if (scsi_dev->tagged_supported) {
scsi_change_queue_depth(scsi_dev, SAS_DEF_QD);
} else {
pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n",
SAS_ADDR(dev->sas_addr), scsi_dev->lun);
scsi_change_queue_depth(scsi_dev, 1);
}
scsi_dev->allow_restart = 1;
return 0;
}
EXPORT_SYMBOL_GPL(sas_slave_configure);
int sas_change_queue_depth(struct scsi_device *sdev, int depth)
{
struct domain_device *dev = sdev_to_domain_dev(sdev);
if (dev_is_sata(dev))
return ata_change_queue_depth(dev->sata_dev.ap, sdev, depth);
if (!sdev->tagged_supported)
depth = 1;
return scsi_change_queue_depth(sdev, depth);
}
EXPORT_SYMBOL_GPL(sas_change_queue_depth);
int sas_bios_param(struct scsi_device *scsi_dev,
struct block_device *bdev,
sector_t capacity, int *hsc)
{
hsc[0] = 255;
hsc[1] = 63;
sector_div(capacity, 255*63);
hsc[2] = capacity;
return 0;
}
EXPORT_SYMBOL_GPL(sas_bios_param);
void sas_task_internal_done(struct sas_task *task)
{
del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
void sas_task_internal_timedout(struct timer_list *t)
{
struct sas_task_slow *slow = from_timer(slow, t, timer);
struct sas_task *task = slow->task;
bool is_completed = true;
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
is_completed = false;
}
spin_unlock_irqrestore(&task->task_state_lock, flags);
if (!is_completed)
complete(&task->slow_task->completion);
}
#define TASK_TIMEOUT (20 * HZ)
#define TASK_RETRY 3
static int sas_execute_internal_abort(struct domain_device *device,
enum sas_internal_abort type, u16 tag,
unsigned int qid, void *data)
{
struct sas_ha_struct *ha = device->port->ha;
struct sas_internal *i = to_sas_internal(ha->shost->transportt);
struct sas_task *task = NULL;
int res, retry;
for (retry = 0; retry < TASK_RETRY; retry++) {
task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
task->dev = device;
task->task_proto = SAS_PROTOCOL_INTERNAL_ABORT;
task->task_done = sas_task_internal_done;
task->slow_task->timer.function = sas_task_internal_timedout;
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
add_timer(&task->slow_task->timer);
task->abort_task.tag = tag;
task->abort_task.type = type;
task->abort_task.qid = qid;
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
del_timer_sync(&task->slow_task->timer);
pr_err("Executing internal abort failed %016llx (%d)\n",
SAS_ADDR(device->sas_addr), res);
break;
}
wait_for_completion(&task->slow_task->completion);
res = TMF_RESP_FUNC_FAILED;
/* Even if the internal abort timed out, return direct. */
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
bool quit = true;
if (i->dft->lldd_abort_timeout)
quit = i->dft->lldd_abort_timeout(task, data);
else
pr_err("Internal abort: timeout %016llx\n",
SAS_ADDR(device->sas_addr));
res = -EIO;
if (quit)
break;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_SAM_STAT_GOOD) {
res = TMF_RESP_FUNC_COMPLETE;
break;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == TMF_RESP_FUNC_SUCC) {
res = TMF_RESP_FUNC_SUCC;
break;
}
pr_err("Internal abort: task to dev %016llx response: 0x%x status 0x%x\n",
SAS_ADDR(device->sas_addr), task->task_status.resp,
task->task_status.stat);
sas_free_task(task);
task = NULL;
}
BUG_ON(retry == TASK_RETRY && task != NULL);
sas_free_task(task);
return res;
}
int sas_execute_internal_abort_single(struct domain_device *device, u16 tag,
unsigned int qid, void *data)
{
return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_SINGLE,
tag, qid, data);
}
EXPORT_SYMBOL_GPL(sas_execute_internal_abort_single);
int sas_execute_internal_abort_dev(struct domain_device *device,
unsigned int qid, void *data)
{
return sas_execute_internal_abort(device, SAS_INTERNAL_ABORT_DEV,
SCSI_NO_TAG, qid, data);
}
EXPORT_SYMBOL_GPL(sas_execute_internal_abort_dev);
int sas_execute_tmf(struct domain_device *device, void *parameter,
int para_len, int force_phy_id,
struct sas_tmf_task *tmf)
{
struct sas_task *task;
struct sas_internal *i =
to_sas_internal(device->port->ha->shost->transportt);
int res, retry;
for (retry = 0; retry < TASK_RETRY; retry++) {
task = sas_alloc_slow_task(GFP_KERNEL);
if (!task)
return -ENOMEM;
task->dev = device;
task->task_proto = device->tproto;
if (dev_is_sata(device)) {
task->ata_task.device_control_reg_update = 1;
if (force_phy_id >= 0) {
task->ata_task.force_phy = true;
task->ata_task.force_phy_id = force_phy_id;
}
memcpy(&task->ata_task.fis, parameter, para_len);
} else {
memcpy(&task->ssp_task, parameter, para_len);
}
task->task_done = sas_task_internal_done;
task->tmf = tmf;
task->slow_task->timer.function = sas_task_internal_timedout;
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
add_timer(&task->slow_task->timer);
res = i->dft->lldd_execute_task(task, GFP_KERNEL);
if (res) {
del_timer_sync(&task->slow_task->timer);
pr_err("executing TMF task failed %016llx (%d)\n",
SAS_ADDR(device->sas_addr), res);
break;
}
wait_for_completion(&task->slow_task->completion);
if (i->dft->lldd_tmf_exec_complete)
i->dft->lldd_tmf_exec_complete(device);
res = TMF_RESP_FUNC_FAILED;
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
pr_err("TMF task timeout for %016llx and not done\n",
SAS_ADDR(device->sas_addr));
if (i->dft->lldd_tmf_aborted)
i->dft->lldd_tmf_aborted(task);
break;
}
pr_warn("TMF task timeout for %016llx and done\n",
SAS_ADDR(device->sas_addr));
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
res = TMF_RESP_FUNC_COMPLETE;
break;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == TMF_RESP_FUNC_SUCC) {
res = TMF_RESP_FUNC_SUCC;
break;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_DATA_UNDERRUN) {
/* no error, but return the number of bytes of
* underrun
*/
pr_warn("TMF task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
SAS_ADDR(device->sas_addr),
task->task_status.resp,
task->task_status.stat);
res = task->task_status.residual;
break;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_DATA_OVERRUN) {
pr_warn("TMF task blocked task error %016llx\n",
SAS_ADDR(device->sas_addr));
res = -EMSGSIZE;
break;
}
if (task->task_status.resp == SAS_TASK_COMPLETE &&
task->task_status.stat == SAS_OPEN_REJECT) {
pr_warn("TMF task open reject failed %016llx\n",
SAS_ADDR(device->sas_addr));
res = -EIO;
} else {
pr_warn("TMF task to dev %016llx resp: 0x%x status 0x%x\n",
SAS_ADDR(device->sas_addr),
task->task_status.resp,
task->task_status.stat);
}
sas_free_task(task);
task = NULL;
}
if (retry == TASK_RETRY)
pr_warn("executing TMF for %016llx failed after %d attempts!\n",
SAS_ADDR(device->sas_addr), TASK_RETRY);
sas_free_task(task);
return res;
}
static int sas_execute_ssp_tmf(struct domain_device *device, u8 *lun,
struct sas_tmf_task *tmf)
{
struct sas_ssp_task ssp_task;
if (!(device->tproto & SAS_PROTOCOL_SSP))
return TMF_RESP_FUNC_ESUPP;
memcpy(ssp_task.LUN, lun, 8);
return sas_execute_tmf(device, &ssp_task, sizeof(ssp_task), -1, tmf);
}
int sas_abort_task_set(struct domain_device *dev, u8 *lun)
{
struct sas_tmf_task tmf_task = {
.tmf = TMF_ABORT_TASK_SET,
};
return sas_execute_ssp_tmf(dev, lun, &tmf_task);
}
EXPORT_SYMBOL_GPL(sas_abort_task_set);
int sas_clear_task_set(struct domain_device *dev, u8 *lun)
{
struct sas_tmf_task tmf_task = {
.tmf = TMF_CLEAR_TASK_SET,
};
return sas_execute_ssp_tmf(dev, lun, &tmf_task);
}
EXPORT_SYMBOL_GPL(sas_clear_task_set);
int sas_lu_reset(struct domain_device *dev, u8 *lun)
{
struct sas_tmf_task tmf_task = {
.tmf = TMF_LU_RESET,
};
return sas_execute_ssp_tmf(dev, lun, &tmf_task);
}
EXPORT_SYMBOL_GPL(sas_lu_reset);
int sas_query_task(struct sas_task *task, u16 tag)
{
struct sas_tmf_task tmf_task = {
.tmf = TMF_QUERY_TASK,
.tag_of_task_to_be_managed = tag,
};
struct scsi_cmnd *cmnd = task->uldd_task;
struct domain_device *dev = task->dev;
struct scsi_lun lun;
int_to_scsilun(cmnd->device->lun, &lun);
return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
}
EXPORT_SYMBOL_GPL(sas_query_task);
int sas_abort_task(struct sas_task *task, u16 tag)
{
struct sas_tmf_task tmf_task = {
.tmf = TMF_ABORT_TASK,
.tag_of_task_to_be_managed = tag,
};
struct scsi_cmnd *cmnd = task->uldd_task;
struct domain_device *dev = task->dev;
struct scsi_lun lun;
int_to_scsilun(cmnd->device->lun, &lun);
return sas_execute_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
}
EXPORT_SYMBOL_GPL(sas_abort_task);
/*
* Tell an upper layer that it needs to initiate an abort for a given task.
* This should only ever be called by an LLDD.
*/
void sas_task_abort(struct sas_task *task)
{
struct scsi_cmnd *sc = task->uldd_task;
/* Escape for libsas internal commands */
if (!sc) {
struct sas_task_slow *slow = task->slow_task;
if (!slow)
return;
if (!del_timer(&slow->timer))
return;
slow->timer.function(&slow->timer);
return;
}
if (dev_is_sata(task->dev))
sas_ata_task_abort(task);
else
blk_abort_request(scsi_cmd_to_rq(sc));
}
EXPORT_SYMBOL_GPL(sas_task_abort);
int sas_slave_alloc(struct scsi_device *sdev)
{
if (dev_is_sata(sdev_to_domain_dev(sdev)) && sdev->lun)
return -ENXIO;
return 0;
}
EXPORT_SYMBOL_GPL(sas_slave_alloc);
void sas_target_destroy(struct scsi_target *starget)
{
struct domain_device *found_dev = starget->hostdata;
if (!found_dev)
return;
starget->hostdata = NULL;
sas_put_device(found_dev);
}
EXPORT_SYMBOL_GPL(sas_target_destroy);
#define SAS_STRING_ADDR_SIZE 16
int sas_request_addr(struct Scsi_Host *shost, u8 *addr)
{
int res;
const struct firmware *fw;
res = request_firmware(&fw, "sas_addr", &shost->shost_gendev);
if (res)
return res;
if (fw->size < SAS_STRING_ADDR_SIZE) {
res = -ENODEV;
goto out;
}
res = hex2bin(addr, fw->data, strnlen(fw->data, SAS_ADDR_SIZE * 2) / 2);
if (res)
goto out;
out:
release_firmware(fw);
return res;
}
EXPORT_SYMBOL_GPL(sas_request_addr);
| linux-master | drivers/scsi/libsas/sas_scsi_host.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Serial Attached SCSI (SAS) Discover process
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <[email protected]>
*/
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include "sas_internal.h"
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include <scsi/sas_ata.h>
#include "scsi_sas_internal.h"
/* ---------- Basic task processing for discovery purposes ---------- */
void sas_init_dev(struct domain_device *dev)
{
switch (dev->dev_type) {
case SAS_END_DEVICE:
INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node);
break;
case SAS_EDGE_EXPANDER_DEVICE:
case SAS_FANOUT_EXPANDER_DEVICE:
INIT_LIST_HEAD(&dev->ex_dev.children);
mutex_init(&dev->ex_dev.cmd_mutex);
break;
default:
break;
}
}
/* ---------- Domain device discovery ---------- */
/**
* sas_get_port_device - Discover devices which caused port creation
* @port: pointer to struct sas_port of interest
*
* Devices directly attached to a HA port, have no parent. This is
* how we know they are (domain) "root" devices. All other devices
* do, and should have their "parent" pointer set appropriately as
* soon as a child device is discovered.
*/
static int sas_get_port_device(struct asd_sas_port *port)
{
struct asd_sas_phy *phy;
struct sas_rphy *rphy;
struct domain_device *dev;
int rc = -ENODEV;
dev = sas_alloc_device();
if (!dev)
return -ENOMEM;
spin_lock_irq(&port->phy_list_lock);
if (list_empty(&port->phy_list)) {
spin_unlock_irq(&port->phy_list_lock);
sas_put_device(dev);
return -ENODEV;
}
phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el);
spin_lock(&phy->frame_rcvd_lock);
memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd),
(size_t)phy->frame_rcvd_size));
spin_unlock(&phy->frame_rcvd_lock);
spin_unlock_irq(&port->phy_list_lock);
if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) {
struct dev_to_host_fis *fis =
(struct dev_to_host_fis *) dev->frame_rcvd;
if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
fis->byte_count_low == 0x69 && fis->byte_count_high == 0x96
&& (fis->device & ~0x10) == 0)
dev->dev_type = SAS_SATA_PM;
else
dev->dev_type = SAS_SATA_DEV;
dev->tproto = SAS_PROTOCOL_SATA;
} else if (port->oob_mode == SAS_OOB_MODE) {
struct sas_identify_frame *id =
(struct sas_identify_frame *) dev->frame_rcvd;
dev->dev_type = id->dev_type;
dev->iproto = id->initiator_bits;
dev->tproto = id->target_bits;
} else {
/* If the oob mode is OOB_NOT_CONNECTED, the port is
* disconnected due to race with PHY down. We cannot
* continue to discover this port
*/
sas_put_device(dev);
pr_warn("Port %016llx is disconnected when discovering\n",
SAS_ADDR(port->attached_sas_addr));
return -ENODEV;
}
sas_init_dev(dev);
dev->port = port;
switch (dev->dev_type) {
case SAS_SATA_DEV:
rc = sas_ata_init(dev);
if (rc) {
rphy = NULL;
break;
}
fallthrough;
case SAS_END_DEVICE:
rphy = sas_end_device_alloc(port->port);
break;
case SAS_EDGE_EXPANDER_DEVICE:
rphy = sas_expander_alloc(port->port,
SAS_EDGE_EXPANDER_DEVICE);
break;
case SAS_FANOUT_EXPANDER_DEVICE:
rphy = sas_expander_alloc(port->port,
SAS_FANOUT_EXPANDER_DEVICE);
break;
default:
pr_warn("ERROR: Unidentified device type %d\n", dev->dev_type);
rphy = NULL;
break;
}
if (!rphy) {
sas_put_device(dev);
return rc;
}
rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
sas_fill_in_rphy(dev, rphy);
sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr);
port->port_dev = dev;
dev->linkrate = port->linkrate;
dev->min_linkrate = port->linkrate;
dev->max_linkrate = port->linkrate;
dev->pathways = port->num_phys;
memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE);
memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE);
memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE);
port->disc.max_level = 0;
sas_device_set_phy(dev, port->port);
dev->rphy = rphy;
get_device(&dev->rphy->dev);
if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
list_add_tail(&dev->disco_list_node, &port->disco_list);
else {
spin_lock_irq(&port->dev_list_lock);
list_add_tail(&dev->dev_list_node, &port->dev_list);
spin_unlock_irq(&port->dev_list_lock);
}
spin_lock_irq(&port->phy_list_lock);
list_for_each_entry(phy, &port->phy_list, port_phy_el)
sas_phy_set_target(phy, dev);
spin_unlock_irq(&port->phy_list_lock);
return 0;
}
/* ---------- Discover and Revalidate ---------- */
int sas_notify_lldd_dev_found(struct domain_device *dev)
{
int res = 0;
struct sas_ha_struct *sas_ha = dev->port->ha;
struct Scsi_Host *shost = sas_ha->shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (!i->dft->lldd_dev_found)
return 0;
res = i->dft->lldd_dev_found(dev);
if (res) {
pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
dev_name(sas_ha->dev),
SAS_ADDR(dev->sas_addr), res);
return res;
}
set_bit(SAS_DEV_FOUND, &dev->state);
kref_get(&dev->kref);
return 0;
}
void sas_notify_lldd_dev_gone(struct domain_device *dev)
{
struct sas_ha_struct *sas_ha = dev->port->ha;
struct Scsi_Host *shost = sas_ha->shost;
struct sas_internal *i = to_sas_internal(shost->transportt);
if (!i->dft->lldd_dev_gone)
return;
if (test_and_clear_bit(SAS_DEV_FOUND, &dev->state)) {
i->dft->lldd_dev_gone(dev);
sas_put_device(dev);
}
}
static void sas_probe_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
/* devices must be domain members before link recovery and probe */
list_for_each_entry(dev, &port->disco_list, disco_list_node) {
spin_lock_irq(&port->dev_list_lock);
list_add_tail(&dev->dev_list_node, &port->dev_list);
spin_unlock_irq(&port->dev_list_lock);
}
sas_probe_sata(port);
list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
int err;
err = sas_rphy_add(dev->rphy);
if (err)
sas_fail_probe(dev, __func__, err);
else
list_del_init(&dev->disco_list_node);
}
}
static void sas_suspend_devices(struct work_struct *work)
{
struct asd_sas_phy *phy;
struct domain_device *dev;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
struct Scsi_Host *shost = port->ha->shost;
struct sas_internal *si = to_sas_internal(shost->transportt);
clear_bit(DISCE_SUSPEND, &port->disc.pending);
sas_suspend_sata(port);
/* lldd is free to forget the domain_device across the
* suspension, we force the issue here to keep the reference
* counts aligned
*/
list_for_each_entry(dev, &port->dev_list, dev_list_node)
sas_notify_lldd_dev_gone(dev);
/* we are suspending, so we know events are disabled and
* phy_list is not being mutated
*/
list_for_each_entry(phy, &port->phy_list, port_phy_el) {
if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
phy->suspended = 1;
port->suspended = 1;
}
}
static void sas_resume_devices(struct work_struct *work)
{
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
clear_bit(DISCE_RESUME, &port->disc.pending);
sas_resume_sata(port);
}
/**
* sas_discover_end_dev - discover an end device (SSP, etc)
* @dev: pointer to domain device of interest
*
* See comment in sas_discover_sata().
*/
int sas_discover_end_dev(struct domain_device *dev)
{
return sas_notify_lldd_dev_found(dev);
}
/* ---------- Device registration and unregistration ---------- */
void sas_free_device(struct kref *kref)
{
struct domain_device *dev = container_of(kref, typeof(*dev), kref);
put_device(&dev->rphy->dev);
dev->rphy = NULL;
if (dev->parent)
sas_put_device(dev->parent);
sas_port_put_phy(dev->phy);
dev->phy = NULL;
/* remove the phys and ports, everything else should be gone */
if (dev_is_expander(dev->dev_type))
kfree(dev->ex_dev.ex_phy);
if (dev_is_sata(dev) && dev->sata_dev.ap) {
ata_sas_tport_delete(dev->sata_dev.ap);
kfree(dev->sata_dev.ap);
ata_host_put(dev->sata_dev.ata_host);
dev->sata_dev.ata_host = NULL;
dev->sata_dev.ap = NULL;
}
kfree(dev);
}
static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
{
struct sas_ha_struct *ha = port->ha;
sas_notify_lldd_dev_gone(dev);
if (!dev->parent)
dev->port->port_dev = NULL;
else
list_del_init(&dev->siblings);
spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
if (dev_is_sata(dev))
sas_ata_end_eh(dev->sata_dev.ap);
spin_unlock_irq(&port->dev_list_lock);
spin_lock_irq(&ha->lock);
if (dev->dev_type == SAS_END_DEVICE &&
!list_empty(&dev->ssp_dev.eh_list_node)) {
list_del_init(&dev->ssp_dev.eh_list_node);
ha->eh_active--;
}
spin_unlock_irq(&ha->lock);
sas_put_device(dev);
}
void sas_destruct_devices(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
list_del_init(&dev->disco_list_node);
sas_remove_children(&dev->rphy->dev);
sas_rphy_delete(dev->rphy);
sas_unregister_common_dev(port, dev);
}
}
static void sas_destruct_ports(struct asd_sas_port *port)
{
struct sas_port *sas_port, *p;
list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) {
list_del_init(&sas_port->del_list);
sas_port_delete(sas_port);
}
}
static bool sas_abort_cmd(struct request *req, void *data)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
struct domain_device *dev = data;
if (dev == cmd_to_domain_dev(cmd))
blk_abort_request(req);
return true;
}
static void sas_abort_device_scsi_cmds(struct domain_device *dev)
{
struct sas_ha_struct *sas_ha = dev->port->ha;
struct Scsi_Host *shost = sas_ha->shost;
if (dev_is_expander(dev->dev_type))
return;
/*
* For removed device with active IOs, the user space applications have
* to spend very long time waiting for the timeout. This is not
* necessary because a removed device will not return the IOs.
* Abort the inflight IOs here so that EH can be quickly kicked in.
*/
blk_mq_tagset_busy_iter(&shost->tag_set, sas_abort_cmd, dev);
}
void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
{
if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
!list_empty(&dev->disco_list_node)) {
/* this rphy never saw sas_rphy_add */
list_del_init(&dev->disco_list_node);
sas_rphy_free(dev->rphy);
sas_unregister_common_dev(port, dev);
return;
}
if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) {
if (test_bit(SAS_DEV_GONE, &dev->state))
sas_abort_device_scsi_cmds(dev);
sas_rphy_unlink(dev->rphy);
list_move_tail(&dev->disco_list_node, &port->destroy_list);
}
}
void sas_unregister_domain_devices(struct asd_sas_port *port, int gone)
{
struct domain_device *dev, *n;
list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) {
if (gone)
set_bit(SAS_DEV_GONE, &dev->state);
sas_unregister_dev(port, dev);
}
list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node)
sas_unregister_dev(port, dev);
port->port->rphy = NULL;
}
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port)
{
struct sas_ha_struct *ha;
struct sas_phy *new_phy;
if (!dev)
return;
ha = dev->port->ha;
new_phy = sas_port_get_phy(port);
/* pin and record last seen phy */
spin_lock_irq(&ha->phy_port_lock);
if (new_phy) {
sas_port_put_phy(dev->phy);
dev->phy = new_phy;
}
spin_unlock_irq(&ha->phy_port_lock);
}
/* ---------- Discovery and Revalidation ---------- */
/**
* sas_discover_domain - discover the domain
* @work: work structure embedded in port domain device.
*
* NOTE: this process _must_ quit (return) as soon as any connection
* errors are encountered. Connection recovery is done elsewhere.
* Discover process only interrogates devices in order to discover the
* domain.
*/
static void sas_discover_domain(struct work_struct *work)
{
struct domain_device *dev;
int error = 0;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending);
if (port->port_dev)
return;
error = sas_get_port_device(port);
if (error)
return;
dev = port->port_dev;
pr_debug("DOING DISCOVERY on port %d, pid:%d\n", port->id,
task_pid_nr(current));
switch (dev->dev_type) {
case SAS_END_DEVICE:
error = sas_discover_end_dev(dev);
break;
case SAS_EDGE_EXPANDER_DEVICE:
case SAS_FANOUT_EXPANDER_DEVICE:
error = sas_discover_root_expander(dev);
break;
case SAS_SATA_DEV:
case SAS_SATA_PM:
error = sas_discover_sata(dev);
break;
default:
error = -ENXIO;
pr_err("unhandled device %d\n", dev->dev_type);
break;
}
if (error) {
sas_rphy_free(dev->rphy);
list_del_init(&dev->disco_list_node);
spin_lock_irq(&port->dev_list_lock);
list_del_init(&dev->dev_list_node);
spin_unlock_irq(&port->dev_list_lock);
sas_put_device(dev);
port->port_dev = NULL;
}
sas_probe_devices(port);
pr_debug("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
task_pid_nr(current), error);
}
static void sas_revalidate_domain(struct work_struct *work)
{
int res = 0;
struct sas_discovery_event *ev = to_sas_discovery_event(work);
struct asd_sas_port *port = ev->port;
struct sas_ha_struct *ha = port->ha;
struct domain_device *ddev = port->port_dev;
/* prevent revalidation from finding sata links in recovery */
mutex_lock(&ha->disco_mutex);
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
pr_debug("REVALIDATION DEFERRED on port %d, pid:%d\n",
port->id, task_pid_nr(current));
goto out;
}
clear_bit(DISCE_REVALIDATE_DOMAIN, &port->disc.pending);
pr_debug("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
task_pid_nr(current));
if (ddev && dev_is_expander(ddev->dev_type))
res = sas_ex_revalidate_domain(ddev);
pr_debug("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
port->id, task_pid_nr(current), res);
out:
mutex_unlock(&ha->disco_mutex);
sas_destruct_devices(port);
sas_destruct_ports(port);
sas_probe_devices(port);
}
/* ---------- Events ---------- */
static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
{
/* chained work is not subject to SA_HA_DRAINING or
* SAS_HA_REGISTERED, because it is either submitted in the
* workqueue, or known to be submitted from a context that is
* not racing against draining
*/
queue_work(ha->disco_q, &sw->work);
}
static void sas_chain_event(int event, unsigned long *pending,
struct sas_work *sw,
struct sas_ha_struct *ha)
{
if (!test_and_set_bit(event, pending)) {
unsigned long flags;
spin_lock_irqsave(&ha->lock, flags);
sas_chain_work(ha, sw);
spin_unlock_irqrestore(&ha->lock, flags);
}
}
void sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
{
struct sas_discovery *disc;
if (!port)
return;
disc = &port->disc;
BUG_ON(ev >= DISC_NUM_EVENTS);
sas_chain_event(ev, &disc->pending, &disc->disc_work[ev].work, port->ha);
}
/**
* sas_init_disc - initialize the discovery struct in the port
* @disc: port discovery structure
* @port: pointer to struct port
*
* Called when the ports are being initialized.
*/
void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
{
int i;
static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
[DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
[DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
[DISCE_SUSPEND] = sas_suspend_devices,
[DISCE_RESUME] = sas_resume_devices,
};
disc->pending = 0;
for (i = 0; i < DISC_NUM_EVENTS; i++) {
INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]);
disc->disc_work[i].port = port;
}
}
| linux-master | drivers/scsi/libsas/sas_discover.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Serial Attached SCSI (SAS) Phy class
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <[email protected]>
*/
#include "sas_internal.h"
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "scsi_sas_internal.h"
/* ---------- Phy events ---------- */
static void sas_phye_loss_of_signal(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
phy->error = 0;
sas_deform_port(phy, 1);
}
static void sas_phye_oob_done(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
phy->error = 0;
}
static void sas_phye_oob_error(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
sas_deform_port(phy, 1);
if (!port && phy->enabled && i->dft->lldd_control_phy) {
phy->error++;
switch (phy->error) {
case 1:
case 2:
i->dft->lldd_control_phy(phy, PHY_FUNC_HARD_RESET,
NULL);
break;
case 3:
default:
phy->error = 0;
phy->enabled = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
break;
}
}
}
static void sas_phye_spinup_hold(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
phy->error = 0;
i->dft->lldd_control_phy(phy, PHY_FUNC_RELEASE_SPINUP_HOLD, NULL);
}
static void sas_phye_resume_timeout(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
/* phew, lldd got the phy back in the nick of time */
if (!phy->suspended) {
dev_info(&phy->phy->dev, "resume timeout cancelled\n");
return;
}
phy->error = 0;
phy->suspended = 0;
sas_deform_port(phy, 1);
}
static void sas_phye_shutdown(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
to_sas_internal(sas_ha->shost->transportt);
if (phy->enabled) {
int ret;
phy->error = 0;
phy->enabled = 0;
ret = i->dft->lldd_control_phy(phy, PHY_FUNC_DISABLE, NULL);
if (ret)
pr_notice("lldd disable phy%d returned %d\n", phy->id,
ret);
} else
pr_notice("phy%d is not enabled, cannot shutdown\n", phy->id);
phy->in_shutdown = 0;
}
/* ---------- Phy class registration ---------- */
int sas_register_phys(struct sas_ha_struct *sas_ha)
{
int i;
/* Now register the phys. */
for (i = 0; i < sas_ha->num_phys; i++) {
struct asd_sas_phy *phy = sas_ha->sas_phy[i];
phy->error = 0;
atomic_set(&phy->event_nr, 0);
INIT_LIST_HEAD(&phy->port_phy_el);
phy->port = NULL;
phy->ha = sas_ha;
spin_lock_init(&phy->frame_rcvd_lock);
spin_lock_init(&phy->sas_prim_lock);
phy->frame_rcvd_size = 0;
phy->phy = sas_phy_alloc(&sas_ha->shost->shost_gendev, i);
if (!phy->phy)
return -ENOMEM;
phy->phy->identify.initiator_port_protocols =
phy->iproto;
phy->phy->identify.target_port_protocols = phy->tproto;
phy->phy->identify.sas_address = SAS_ADDR(sas_ha->sas_addr);
phy->phy->identify.phy_identifier = i;
phy->phy->minimum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
phy->phy->maximum_linkrate_hw = SAS_LINK_RATE_UNKNOWN;
phy->phy->minimum_linkrate = SAS_LINK_RATE_UNKNOWN;
phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
sas_phy_add(phy->phy);
}
return 0;
}
const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
[PHYE_LOSS_OF_SIGNAL] = sas_phye_loss_of_signal,
[PHYE_OOB_DONE] = sas_phye_oob_done,
[PHYE_OOB_ERROR] = sas_phye_oob_error,
[PHYE_SPINUP_HOLD] = sas_phye_spinup_hold,
[PHYE_RESUME_TIMEOUT] = sas_phye_resume_timeout,
[PHYE_SHUTDOWN] = sas_phye_shutdown,
};
| linux-master | drivers/scsi/libsas/sas_phy.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Support for SATA devices on Serial Attached SCSI (SAS) controllers
*
* Copyright (C) 2006 IBM Corporation
*
* Written by: Darrick J. Wong <[email protected]>, IBM Corporation
*/
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/async.h>
#include <linux/export.h>
#include <scsi/sas_ata.h>
#include "sas_internal.h"
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "scsi_sas_internal.h"
#include "scsi_transport_api.h"
#include <scsi/scsi_eh.h>
static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts)
{
/* Cheesy attempt to translate SAS errors into ATA. Hah! */
/* transport error */
if (ts->resp == SAS_TASK_UNDELIVERED)
return AC_ERR_ATA_BUS;
/* ts->resp == SAS_TASK_COMPLETE */
/* task delivered, what happened afterwards? */
switch (ts->stat) {
case SAS_DEV_NO_RESPONSE:
return AC_ERR_TIMEOUT;
case SAS_INTERRUPTED:
case SAS_PHY_DOWN:
case SAS_NAK_R_ERR:
return AC_ERR_ATA_BUS;
case SAS_DATA_UNDERRUN:
/*
* Some programs that use the taskfile interface
* (smartctl in particular) can cause underrun
* problems. Ignore these errors, perhaps at our
* peril.
*/
return 0;
case SAS_DATA_OVERRUN:
case SAS_QUEUE_FULL:
case SAS_DEVICE_UNKNOWN:
case SAS_OPEN_TO:
case SAS_OPEN_REJECT:
pr_warn("%s: Saw error %d. What to do?\n",
__func__, ts->stat);
return AC_ERR_OTHER;
case SAM_STAT_CHECK_CONDITION:
case SAS_ABORTED_TASK:
return AC_ERR_DEV;
case SAS_PROTO_RESPONSE:
/* This means the ending_fis has the error
* value; return 0 here to collect it
*/
return 0;
default:
return 0;
}
}
static void sas_ata_task_done(struct sas_task *task)
{
struct ata_queued_cmd *qc = task->uldd_task;
struct domain_device *dev = task->dev;
struct task_status_struct *stat = &task->task_status;
struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf;
struct sas_ha_struct *sas_ha = dev->port->ha;
enum ata_completion_errors ac;
unsigned long flags;
struct ata_link *link;
struct ata_port *ap;
spin_lock_irqsave(&dev->done_lock, flags);
if (test_bit(SAS_HA_FROZEN, &sas_ha->state))
task = NULL;
else if (qc && qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, NULL);
spin_unlock_irqrestore(&dev->done_lock, flags);
/* check if libsas-eh got to the task before us */
if (unlikely(!task))
return;
if (!qc)
goto qc_already_gone;
ap = qc->ap;
link = &ap->link;
spin_lock_irqsave(ap->lock, flags);
/* check if we lost the race with libata/sas_ata_post_internal() */
if (unlikely(ata_port_is_frozen(ap))) {
spin_unlock_irqrestore(ap->lock, flags);
if (qc->scsicmd)
goto qc_already_gone;
else {
/* if eh is not involved and the port is frozen then the
* ata internal abort process has taken responsibility
* for this sas_task
*/
return;
}
}
if (stat->stat == SAS_PROTO_RESPONSE ||
stat->stat == SAS_SAM_STAT_GOOD ||
(stat->stat == SAS_SAM_STAT_CHECK_CONDITION &&
dev->sata_dev.class == ATA_DEV_ATAPI)) {
memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
if (!link->sactive) {
qc->err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
} else {
link->eh_info.err_mask |= ac_err_mask(dev->sata_dev.fis[2]);
if (unlikely(link->eh_info.err_mask))
qc->flags |= ATA_QCFLAG_EH;
}
} else {
ac = sas_to_ata_err(stat);
if (ac) {
pr_warn("%s: SAS error 0x%x\n", __func__, stat->stat);
/* We saw a SAS error. Send a vague error. */
if (!link->sactive) {
qc->err_mask = ac;
} else {
link->eh_info.err_mask |= AC_ERR_DEV;
qc->flags |= ATA_QCFLAG_EH;
}
dev->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */
dev->sata_dev.fis[3] = ATA_ABORTED; /* tf error */
}
}
qc->lldd_task = NULL;
ata_qc_complete(qc);
spin_unlock_irqrestore(ap->lock, flags);
qc_already_gone:
sas_free_task(task);
}
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
__must_hold(ap->lock)
{
struct sas_task *task;
struct scatterlist *sg;
int ret = AC_ERR_SYSTEM;
unsigned int si, xfer = 0;
struct ata_port *ap = qc->ap;
struct domain_device *dev = ap->private_data;
struct sas_ha_struct *sas_ha = dev->port->ha;
struct Scsi_Host *host = sas_ha->shost;
struct sas_internal *i = to_sas_internal(host->transportt);
/* TODO: we should try to remove that unlock */
spin_unlock(ap->lock);
/* If the device fell off, no sense in issuing commands */
if (test_bit(SAS_DEV_GONE, &dev->state))
goto out;
task = sas_alloc_task(GFP_ATOMIC);
if (!task)
goto out;
task->dev = dev;
task->task_proto = SAS_PROTOCOL_STP;
task->task_done = sas_ata_task_done;
/* For NCQ commands, zero out the tag libata assigned us */
if (ata_is_ncq(qc->tf.protocol))
qc->tf.nsect = 0;
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *)&task->ata_task.fis);
task->uldd_task = qc;
if (ata_is_atapi(qc->tf.protocol)) {
memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len);
task->total_xfer_len = qc->nbytes;
task->num_scatter = qc->n_elem;
task->data_dir = qc->dma_dir;
} else if (!ata_is_data(qc->tf.protocol)) {
task->data_dir = DMA_NONE;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
task->data_dir = qc->dma_dir;
}
task->scatter = qc->sg;
qc->lldd_task = task;
task->ata_task.use_ncq = ata_is_ncq(qc->tf.protocol);
task->ata_task.dma_xfer = ata_is_dma(qc->tf.protocol);
if (qc->flags & ATA_QCFLAG_RESULT_TF)
task->ata_task.return_fis_on_success = 1;
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, task);
ret = i->dft->lldd_execute_task(task, GFP_ATOMIC);
if (ret) {
pr_debug("lldd_execute_task returned: %d\n", ret);
if (qc->scsicmd)
ASSIGN_SAS_TASK(qc->scsicmd, NULL);
sas_free_task(task);
qc->lldd_task = NULL;
ret = AC_ERR_SYSTEM;
}
out:
spin_lock(ap->lock);
return ret;
}
static void sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc)
{
struct domain_device *dev = qc->ap->private_data;
ata_tf_from_fis(dev->sata_dev.fis, &qc->result_tf);
}
static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
{
return to_sas_internal(dev->port->ha->shost->transportt);
}
static int sas_get_ata_command_set(struct domain_device *dev)
{
struct ata_taskfile tf;
if (dev->dev_type == SAS_SATA_PENDING)
return ATA_DEV_UNKNOWN;
ata_tf_from_fis(dev->frame_rcvd, &tf);
return ata_dev_classify(&tf);
}
int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
{
if (phy->attached_tproto & SAS_PROTOCOL_STP)
dev->tproto = phy->attached_tproto;
if (phy->attached_sata_dev)
dev->tproto |= SAS_SATA_DEV;
if (phy->attached_dev_type == SAS_SATA_PENDING)
dev->dev_type = SAS_SATA_PENDING;
else {
int res;
dev->dev_type = SAS_SATA_DEV;
res = sas_get_report_phy_sata(dev->parent, phy->phy_id,
&dev->sata_dev.rps_resp);
if (res) {
pr_debug("report phy sata to %016llx:%02d returned 0x%x\n",
SAS_ADDR(dev->parent->sas_addr),
phy->phy_id, res);
return res;
}
memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
sizeof(struct dev_to_host_fis));
dev->sata_dev.class = sas_get_ata_command_set(dev);
}
return 0;
}
static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy)
{
int res;
/* we weren't pending, so successfully end the reset sequence now */
if (dev->dev_type != SAS_SATA_PENDING)
return 1;
/* hmmm, if this succeeds do we need to repost the domain_device to the
* lldd so it can pick up new parameters?
*/
res = sas_get_ata_info(dev, phy);
if (res)
return 0; /* retry */
else
return 1;
}
int smp_ata_check_ready_type(struct ata_link *link)
{
struct domain_device *dev = link->ap->private_data;
struct sas_phy *phy = sas_get_local_phy(dev);
struct domain_device *ex_dev = dev->parent;
enum sas_device_type type = SAS_PHY_UNUSED;
u8 sas_addr[SAS_ADDR_SIZE];
int res;
res = sas_get_phy_attached_dev(ex_dev, phy->number, sas_addr, &type);
sas_put_local_phy(phy);
if (res)
return res;
switch (type) {
case SAS_SATA_PENDING:
return 0;
case SAS_END_DEVICE:
return 1;
default:
return -ENODEV;
}
}
EXPORT_SYMBOL_GPL(smp_ata_check_ready_type);
static int smp_ata_check_ready(struct ata_link *link)
{
int res;
struct ata_port *ap = link->ap;
struct domain_device *dev = ap->private_data;
struct domain_device *ex_dev = dev->parent;
struct sas_phy *phy = sas_get_local_phy(dev);
struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy->number];
res = sas_ex_phy_discover(ex_dev, phy->number);
sas_put_local_phy(phy);
/* break the wait early if the expander is unreachable,
* otherwise keep polling
*/
if (res == -ECOMM)
return res;
if (res != SMP_RESP_FUNC_ACC)
return 0;
switch (ex_phy->attached_dev_type) {
case SAS_SATA_PENDING:
return 0;
case SAS_END_DEVICE:
if (ex_phy->attached_sata_dev)
return sas_ata_clear_pending(dev, ex_phy);
fallthrough;
default:
return -ENODEV;
}
}
static int local_ata_check_ready(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct domain_device *dev = ap->private_data;
struct sas_internal *i = dev_to_sas_internal(dev);
if (i->dft->lldd_ata_check_ready)
return i->dft->lldd_ata_check_ready(dev);
else {
/* lldd's that don't implement 'ready' checking get the
* old default behavior of not coordinating reset
* recovery with libata
*/
return 1;
}
}
static int sas_ata_printk(const char *level, const struct domain_device *ddev,
const char *fmt, ...)
{
struct ata_port *ap = ddev->sata_dev.ap;
struct device *dev = &ddev->rphy->dev;
struct va_format vaf;
va_list args;
int r;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
r = printk("%s" SAS_FMT "ata%u: %s: %pV",
level, ap->print_id, dev_name(dev), &vaf);
va_end(args);
return r;
}
static int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline)
{
struct sata_device *sata_dev = &dev->sata_dev;
int (*check_ready)(struct ata_link *link);
struct ata_port *ap = sata_dev->ap;
struct ata_link *link = &ap->link;
struct sas_phy *phy;
int ret;
phy = sas_get_local_phy(dev);
if (scsi_is_sas_phy_local(phy))
check_ready = local_ata_check_ready;
else
check_ready = smp_ata_check_ready;
sas_put_local_phy(phy);
ret = ata_wait_after_reset(link, deadline, check_ready);
if (ret && ret != -EAGAIN)
sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
return ret;
}
static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct domain_device *dev = ap->private_data;
struct sas_internal *i = dev_to_sas_internal(dev);
int ret;
ret = i->dft->lldd_I_T_nexus_reset(dev);
if (ret == -ENODEV)
return ret;
if (ret != TMF_RESP_FUNC_COMPLETE)
sas_ata_printk(KERN_DEBUG, dev, "Unable to reset ata device?\n");
ret = sas_ata_wait_after_reset(dev, deadline);
*class = dev->sata_dev.class;
ap->cbl = ATA_CBL_SATA;
return ret;
}
/*
* notify the lldd to forget the sas_task for this internal ata command
* that bypasses scsi-eh
*/
static void sas_ata_internal_abort(struct sas_task *task)
{
struct sas_internal *si = dev_to_sas_internal(task->dev);
unsigned long flags;
int res;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED ||
task->task_state_flags & SAS_TASK_STATE_DONE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
pr_debug("%s: Task %p already finished.\n", __func__, task);
goto out;
}
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
res = si->dft->lldd_abort_task(task);
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE ||
res == TMF_RESP_FUNC_COMPLETE) {
spin_unlock_irqrestore(&task->task_state_lock, flags);
goto out;
}
/* XXX we are not prepared to deal with ->lldd_abort_task()
* failures. TODO: lldds need to unconditionally forget about
* aborted ata tasks, otherwise we (likely) leak the sas task
* here
*/
pr_warn("%s: Task %p leaked.\n", __func__, task);
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
task->task_state_flags &= ~SAS_TASK_STATE_ABORTED;
spin_unlock_irqrestore(&task->task_state_lock, flags);
return;
out:
sas_free_task(task);
}
static void sas_ata_post_internal(struct ata_queued_cmd *qc)
{
if (qc->flags & ATA_QCFLAG_EH)
qc->err_mask |= AC_ERR_OTHER;
if (qc->err_mask) {
/*
* Find the sas_task and kill it. By this point, libata
* has decided to kill the qc and has frozen the port.
* In this state sas_ata_task_done() will no longer free
* the sas_task, so we need to notify the lldd (via
* ->lldd_abort_task) that the task is dead and free it
* ourselves.
*/
struct sas_task *task = qc->lldd_task;
qc->lldd_task = NULL;
if (!task)
return;
task->uldd_task = NULL;
sas_ata_internal_abort(task);
}
}
static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev)
{
struct domain_device *dev = ap->private_data;
struct sas_internal *i = dev_to_sas_internal(dev);
if (i->dft->lldd_ata_set_dmamode)
i->dft->lldd_ata_set_dmamode(dev);
}
static void sas_ata_sched_eh(struct ata_port *ap)
{
struct domain_device *dev = ap->private_data;
struct sas_ha_struct *ha = dev->port->ha;
unsigned long flags;
spin_lock_irqsave(&ha->lock, flags);
if (!test_and_set_bit(SAS_DEV_EH_PENDING, &dev->state))
ha->eh_active++;
ata_std_sched_eh(ap);
spin_unlock_irqrestore(&ha->lock, flags);
}
void sas_ata_end_eh(struct ata_port *ap)
{
struct domain_device *dev = ap->private_data;
struct sas_ha_struct *ha = dev->port->ha;
unsigned long flags;
spin_lock_irqsave(&ha->lock, flags);
if (test_and_clear_bit(SAS_DEV_EH_PENDING, &dev->state))
ha->eh_active--;
spin_unlock_irqrestore(&ha->lock, flags);
}
static int sas_ata_prereset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct domain_device *dev = ap->private_data;
struct sas_phy *local_phy = sas_get_local_phy(dev);
int res = 0;
if (!local_phy->enabled || test_bit(SAS_DEV_GONE, &dev->state))
res = -ENOENT;
sas_put_local_phy(local_phy);
return res;
}
static struct ata_port_operations sas_sata_ops = {
.prereset = sas_ata_prereset,
.hardreset = sas_ata_hard_reset,
.error_handler = ata_std_error_handler,
.post_internal_cmd = sas_ata_post_internal,
.qc_defer = ata_std_qc_defer,
.qc_prep = ata_noop_qc_prep,
.qc_issue = sas_ata_qc_issue,
.qc_fill_rtf = sas_ata_qc_fill_rtf,
.set_dmamode = sas_ata_set_dmamode,
.sched_eh = sas_ata_sched_eh,
.end_eh = sas_ata_end_eh,
};
static struct ata_port_info sata_port_info = {
.flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ |
ATA_FLAG_SAS_HOST | ATA_FLAG_FPDMA_AUX,
.pio_mask = ATA_PIO4,
.mwdma_mask = ATA_MWDMA2,
.udma_mask = ATA_UDMA6,
.port_ops = &sas_sata_ops
};
int sas_ata_init(struct domain_device *found_dev)
{
struct sas_ha_struct *ha = found_dev->port->ha;
struct Scsi_Host *shost = ha->shost;
struct ata_host *ata_host;
struct ata_port *ap;
int rc;
ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL);
if (!ata_host) {
pr_err("ata host alloc failed.\n");
return -ENOMEM;
}
ata_host_init(ata_host, ha->dev, &sas_sata_ops);
ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost);
if (!ap) {
pr_err("ata_sas_port_alloc failed.\n");
rc = -ENODEV;
goto free_host;
}
ap->private_data = found_dev;
ap->cbl = ATA_CBL_SATA;
ap->scsi_host = shost;
rc = ata_sas_tport_add(ata_host->dev, ap);
if (rc)
goto destroy_port;
found_dev->sata_dev.ata_host = ata_host;
found_dev->sata_dev.ap = ap;
return 0;
destroy_port:
kfree(ap);
free_host:
ata_host_put(ata_host);
return rc;
}
void sas_ata_task_abort(struct sas_task *task)
{
struct ata_queued_cmd *qc = task->uldd_task;
struct completion *waiting;
/* Bounce SCSI-initiated commands to the SCSI EH */
if (qc->scsicmd) {
blk_abort_request(scsi_cmd_to_rq(qc->scsicmd));
return;
}
/* Internal command, fake a timeout and complete. */
qc->flags &= ~ATA_QCFLAG_ACTIVE;
qc->flags |= ATA_QCFLAG_EH;
qc->err_mask |= AC_ERR_TIMEOUT;
waiting = qc->private_data;
complete(waiting);
}
void sas_probe_sata(struct asd_sas_port *port)
{
struct domain_device *dev, *n;
mutex_lock(&port->ha->disco_mutex);
list_for_each_entry(dev, &port->disco_list, disco_list_node) {
if (!dev_is_sata(dev))
continue;
ata_port_probe(dev->sata_dev.ap);
}
mutex_unlock(&port->ha->disco_mutex);
list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
if (!dev_is_sata(dev))
continue;
sas_ata_wait_eh(dev);
/* if libata could not bring the link up, don't surface
* the device
*/
if (!ata_dev_enabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, __func__, -ENODEV);
}
}
int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy,
struct domain_device *child, int phy_id)
{
struct sas_rphy *rphy;
int ret;
if (child->linkrate > parent->min_linkrate) {
struct sas_phy *cphy = child->phy;
enum sas_linkrate min_prate = cphy->minimum_linkrate,
parent_min_lrate = parent->min_linkrate,
min_linkrate = (min_prate > parent_min_lrate) ?
parent_min_lrate : 0;
struct sas_phy_linkrates rates = {
.maximum_linkrate = parent->min_linkrate,
.minimum_linkrate = min_linkrate,
};
pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n",
SAS_ADDR(child->sas_addr), phy_id);
ret = sas_smp_phy_control(parent, phy_id,
PHY_FUNC_LINK_RESET, &rates);
if (ret) {
pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n",
SAS_ADDR(child->sas_addr), phy_id, ret);
return ret;
}
pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n",
SAS_ADDR(child->sas_addr), phy_id);
child->linkrate = child->min_linkrate;
}
ret = sas_get_ata_info(child, phy);
if (ret)
return ret;
sas_init_dev(child);
ret = sas_ata_init(child);
if (ret)
return ret;
rphy = sas_end_device_alloc(phy->port);
if (!rphy)
return -ENOMEM;
rphy->identify.phy_identifier = phy_id;
child->rphy = rphy;
get_device(&rphy->dev);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
ret = sas_discover_sata(child);
if (ret) {
pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n",
SAS_ADDR(child->sas_addr),
SAS_ADDR(parent->sas_addr), phy_id, ret);
sas_rphy_free(child->rphy);
list_del(&child->disco_list_node);
return ret;
}
return 0;
}
static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func)
{
struct domain_device *dev, *n;
list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
if (!dev_is_sata(dev))
continue;
sas_ata_wait_eh(dev);
/* if libata failed to power manage the device, tear it down */
if (ata_dev_disabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, func, -ENODEV);
}
}
void sas_suspend_sata(struct asd_sas_port *port)
{
struct domain_device *dev;
mutex_lock(&port->ha->disco_mutex);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
struct sata_device *sata;
if (!dev_is_sata(dev))
continue;
sata = &dev->sata_dev;
if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND)
continue;
ata_sas_port_suspend(sata->ap);
}
mutex_unlock(&port->ha->disco_mutex);
sas_ata_flush_pm_eh(port, __func__);
}
void sas_resume_sata(struct asd_sas_port *port)
{
struct domain_device *dev;
mutex_lock(&port->ha->disco_mutex);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
struct sata_device *sata;
if (!dev_is_sata(dev))
continue;
sata = &dev->sata_dev;
if (sata->ap->pm_mesg.event == PM_EVENT_ON)
continue;
ata_sas_port_resume(sata->ap);
}
mutex_unlock(&port->ha->disco_mutex);
sas_ata_flush_pm_eh(port, __func__);
}
/**
* sas_discover_sata - discover an STP/SATA domain device
* @dev: pointer to struct domain_device of interest
*
* Devices directly attached to a HA port, have no parents. All other
* devices do, and should have their "parent" pointer set appropriately
* before calling this function.
*/
int sas_discover_sata(struct domain_device *dev)
{
if (dev->dev_type == SAS_SATA_PM)
return -ENODEV;
dev->sata_dev.class = sas_get_ata_command_set(dev);
sas_fill_in_rphy(dev, dev->rphy);
return sas_notify_lldd_dev_found(dev);
}
static void async_sas_ata_eh(void *data, async_cookie_t cookie)
{
struct domain_device *dev = data;
struct ata_port *ap = dev->sata_dev.ap;
struct sas_ha_struct *ha = dev->port->ha;
sas_ata_printk(KERN_DEBUG, dev, "dev error handler\n");
ata_scsi_port_error_handler(ha->shost, ap);
sas_put_device(dev);
}
void sas_ata_strategy_handler(struct Scsi_Host *shost)
{
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
ASYNC_DOMAIN_EXCLUSIVE(async);
int i;
/* it's ok to defer revalidation events during ata eh, these
* disks are in one of three states:
* 1/ present for initial domain discovery, and these
* resets will cause bcn flutters
* 2/ hot removed, we'll discover that after eh fails
* 3/ hot added after initial discovery, lost the race, and need
* to catch the next train.
*/
sas_disable_revalidation(sas_ha);
spin_lock_irq(&sas_ha->phy_port_lock);
for (i = 0; i < sas_ha->num_phys; i++) {
struct asd_sas_port *port = sas_ha->sas_port[i];
struct domain_device *dev;
spin_lock(&port->dev_list_lock);
list_for_each_entry(dev, &port->dev_list, dev_list_node) {
if (!dev_is_sata(dev))
continue;
/* hold a reference over eh since we may be
* racing with final remove once all commands
* are completed
*/
kref_get(&dev->kref);
async_schedule_domain(async_sas_ata_eh, dev, &async);
}
spin_unlock(&port->dev_list_lock);
}
spin_unlock_irq(&sas_ha->phy_port_lock);
async_synchronize_full_domain(&async);
sas_enable_revalidation(sas_ha);
}
void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q)
{
struct scsi_cmnd *cmd, *n;
struct domain_device *eh_dev;
do {
LIST_HEAD(sata_q);
eh_dev = NULL;
list_for_each_entry_safe(cmd, n, work_q, eh_entry) {
struct domain_device *ddev = cmd_to_domain_dev(cmd);
if (!dev_is_sata(ddev) || TO_SAS_TASK(cmd))
continue;
if (eh_dev && eh_dev != ddev)
continue;
eh_dev = ddev;
list_move(&cmd->eh_entry, &sata_q);
}
if (!list_empty(&sata_q)) {
struct ata_port *ap = eh_dev->sata_dev.ap;
sas_ata_printk(KERN_DEBUG, eh_dev, "cmd error handler\n");
ata_scsi_cmd_error_handler(shost, ap, &sata_q);
/*
* ata's error handler may leave the cmd on the list
* so make sure they don't remain on a stack list
* about to go out of scope.
*
* This looks strange, since the commands are
* now part of no list, but the next error
* action will be ata_port_error_handler()
* which takes no list and sweeps them up
* anyway from the ata tag array.
*/
while (!list_empty(&sata_q))
list_del_init(sata_q.next);
}
} while (eh_dev);
}
void sas_ata_schedule_reset(struct domain_device *dev)
{
struct ata_eh_info *ehi;
struct ata_port *ap;
unsigned long flags;
if (!dev_is_sata(dev))
return;
ap = dev->sata_dev.ap;
ehi = &ap->link.eh_info;
spin_lock_irqsave(ap->lock, flags);
ehi->err_mask |= AC_ERR_TIMEOUT;
ehi->action |= ATA_EH_RESET;
ata_port_schedule_eh(ap);
spin_unlock_irqrestore(ap->lock, flags);
}
EXPORT_SYMBOL_GPL(sas_ata_schedule_reset);
void sas_ata_wait_eh(struct domain_device *dev)
{
struct ata_port *ap;
if (!dev_is_sata(dev))
return;
ap = dev->sata_dev.ap;
ata_port_wait_eh(ap);
}
void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
{
struct ata_port *ap = device->sata_dev.ap;
struct ata_link *link = &ap->link;
unsigned long flags;
spin_lock_irqsave(ap->lock, flags);
device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */
device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */
link->eh_info.err_mask |= AC_ERR_DEV;
if (force_reset)
link->eh_info.action |= ATA_EH_RESET;
ata_link_abort(link);
spin_unlock_irqrestore(ap->lock, flags);
}
EXPORT_SYMBOL_GPL(sas_ata_device_link_abort);
int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id)
{
struct sas_tmf_task tmf_task = {};
return sas_execute_tmf(device, fis, sizeof(struct host_to_dev_fis),
force_phy_id, &tmf_task);
}
EXPORT_SYMBOL_GPL(sas_execute_ata_cmd);
| linux-master | drivers/scsi/libsas/sas_ata.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Serial Attached SCSI (SAS) Port class
*
* Copyright (C) 2005 Adaptec, Inc. All rights reserved.
* Copyright (C) 2005 Luben Tuikov <[email protected]>
*/
#include "sas_internal.h"
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_sas.h>
#include "scsi_sas_internal.h"
static bool phy_is_wideport_member(struct asd_sas_port *port, struct asd_sas_phy *phy)
{
struct sas_ha_struct *sas_ha = phy->ha;
if (memcmp(port->attached_sas_addr, phy->attached_sas_addr,
SAS_ADDR_SIZE) != 0 || (sas_ha->strict_wide_ports &&
memcmp(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE) != 0))
return false;
return true;
}
static void sas_resume_port(struct asd_sas_phy *phy)
{
struct domain_device *dev, *n;
struct asd_sas_port *port = phy->port;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *si = to_sas_internal(sas_ha->shost->transportt);
if (si->dft->lldd_port_formed)
si->dft->lldd_port_formed(phy);
if (port->suspended)
port->suspended = 0;
else {
/* we only need to handle "link returned" actions once */
return;
}
/* if the port came back:
* 1/ presume every device came back
* 2/ force the next revalidation to check all expander phys
*/
list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) {
int i, rc;
rc = sas_notify_lldd_dev_found(dev);
if (rc) {
sas_unregister_dev(port, dev);
sas_destruct_devices(port);
continue;
}
if (dev_is_expander(dev->dev_type)) {
dev->ex_dev.ex_change_count = -1;
for (i = 0; i < dev->ex_dev.num_phys; i++) {
struct ex_phy *phy = &dev->ex_dev.ex_phy[i];
phy->phy_change_count = -1;
}
}
}
sas_discover_event(port, DISCE_RESUME);
}
static void sas_form_port_add_phy(struct asd_sas_port *port,
struct asd_sas_phy *phy, bool wideport)
{
list_add_tail(&phy->port_phy_el, &port->phy_list);
sas_phy_set_target(phy, port->port_dev);
phy->port = port;
port->num_phys++;
port->phy_mask |= (1U << phy->id);
if (wideport)
pr_debug("phy%d matched wide port%d\n", phy->id,
port->id);
else
memcpy(port->sas_addr, phy->sas_addr, SAS_ADDR_SIZE);
if (*(u64 *)port->attached_sas_addr == 0) {
memcpy(port->attached_sas_addr, phy->attached_sas_addr,
SAS_ADDR_SIZE);
port->iproto = phy->iproto;
port->tproto = phy->tproto;
port->oob_mode = phy->oob_mode;
port->linkrate = phy->linkrate;
} else {
port->linkrate = max(port->linkrate, phy->linkrate);
}
}
/**
* sas_form_port - add this phy to a port
* @phy: the phy of interest
*
* This function adds this phy to an existing port, thus creating a wide
* port, or it creates a port and adds the phy to the port.
*/
static void sas_form_port(struct asd_sas_phy *phy)
{
int i;
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct domain_device *port_dev = NULL;
struct sas_internal *si =
to_sas_internal(sas_ha->shost->transportt);
unsigned long flags;
if (port) {
if (!phy_is_wideport_member(port, phy))
sas_deform_port(phy, 0);
else if (phy->suspended) {
phy->suspended = 0;
sas_resume_port(phy);
/* phy came back, try to cancel the timeout */
wake_up(&sas_ha->eh_wait_q);
return;
} else {
pr_info("%s: phy%d belongs to port%d already(%d)!\n",
__func__, phy->id, phy->port->id,
phy->port->num_phys);
return;
}
}
/* see if the phy should be part of a wide port */
spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
for (i = 0; i < sas_ha->num_phys; i++) {
port = sas_ha->sas_port[i];
spin_lock(&port->phy_list_lock);
if (*(u64 *) port->sas_addr &&
phy_is_wideport_member(port, phy) && port->num_phys > 0) {
/* wide port */
port_dev = port->port_dev;
sas_form_port_add_phy(port, phy, true);
spin_unlock(&port->phy_list_lock);
break;
}
spin_unlock(&port->phy_list_lock);
}
/* The phy does not match any existing port, create a new one */
if (i == sas_ha->num_phys) {
for (i = 0; i < sas_ha->num_phys; i++) {
port = sas_ha->sas_port[i];
spin_lock(&port->phy_list_lock);
if (*(u64 *)port->sas_addr == 0
&& port->num_phys == 0) {
port_dev = port->port_dev;
sas_form_port_add_phy(port, phy, false);
spin_unlock(&port->phy_list_lock);
break;
}
spin_unlock(&port->phy_list_lock);
}
if (i >= sas_ha->num_phys) {
pr_err("%s: couldn't find a free port, bug?\n",
__func__);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
return;
}
}
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
if (!port->port) {
port->port = sas_port_alloc(phy->phy->dev.parent, port->id);
BUG_ON(!port->port);
sas_port_add(port->port);
}
sas_port_add_phy(port->port, phy->phy);
pr_debug("%s added to %s, phy_mask:0x%x (%016llx)\n",
dev_name(&phy->phy->dev), dev_name(&port->port->dev),
port->phy_mask,
SAS_ADDR(port->attached_sas_addr));
if (port_dev)
port_dev->pathways = port->num_phys;
/* Tell the LLDD about this port formation. */
if (si->dft->lldd_port_formed)
si->dft->lldd_port_formed(phy);
sas_discover_event(phy->port, DISCE_DISCOVER_DOMAIN);
/* Only insert a revalidate event after initial discovery */
if (port_dev && dev_is_expander(port_dev->dev_type)) {
struct expander_device *ex_dev = &port_dev->ex_dev;
ex_dev->ex_change_count = -1;
sas_discover_event(port, DISCE_REVALIDATE_DOMAIN);
}
flush_workqueue(sas_ha->disco_q);
}
/**
* sas_deform_port - remove this phy from the port it belongs to
* @phy: the phy of interest
* @gone: whether or not the PHY is gone
*
* This is called when the physical link to the other phy has been
* lost (on this phy), in Event thread context. We cannot delay here.
*/
void sas_deform_port(struct asd_sas_phy *phy, int gone)
{
struct sas_ha_struct *sas_ha = phy->ha;
struct asd_sas_port *port = phy->port;
struct sas_internal *si =
to_sas_internal(sas_ha->shost->transportt);
struct domain_device *dev;
unsigned long flags;
if (!port)
return; /* done by a phy event */
dev = port->port_dev;
if (dev)
dev->pathways--;
if (port->num_phys == 1) {
sas_unregister_domain_devices(port, gone);
sas_destruct_devices(port);
sas_port_delete(port->port);
port->port = NULL;
} else {
sas_port_delete_phy(port->port, phy->phy);
sas_device_set_phy(dev, port->port);
}
if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
spin_lock_irqsave(&sas_ha->phy_port_lock, flags);
spin_lock(&port->phy_list_lock);
list_del_init(&phy->port_phy_el);
sas_phy_set_target(phy, NULL);
phy->port = NULL;
port->num_phys--;
port->phy_mask &= ~(1U << phy->id);
if (port->num_phys == 0) {
INIT_LIST_HEAD(&port->phy_list);
memset(port->sas_addr, 0, SAS_ADDR_SIZE);
memset(port->attached_sas_addr, 0, SAS_ADDR_SIZE);
port->iproto = 0;
port->tproto = 0;
port->oob_mode = 0;
port->phy_mask = 0;
}
spin_unlock(&port->phy_list_lock);
spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags);
/* Only insert revalidate event if the port still has members */
if (port->port && dev && dev_is_expander(dev->dev_type)) {
struct expander_device *ex_dev = &dev->ex_dev;
ex_dev->ex_change_count = -1;
sas_discover_event(port, DISCE_REVALIDATE_DOMAIN);
}
flush_workqueue(sas_ha->disco_q);
return;
}
/* ---------- SAS port events ---------- */
void sas_porte_bytes_dmaed(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
sas_form_port(phy);
}
void sas_porte_broadcast_rcvd(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
unsigned long flags;
u32 prim;
spin_lock_irqsave(&phy->sas_prim_lock, flags);
prim = phy->sas_prim;
spin_unlock_irqrestore(&phy->sas_prim_lock, flags);
pr_debug("broadcast received: %d\n", prim);
sas_discover_event(phy->port, DISCE_REVALIDATE_DOMAIN);
if (phy->port)
flush_workqueue(phy->port->ha->disco_q);
}
void sas_porte_link_reset_err(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
sas_deform_port(phy, 1);
}
void sas_porte_timer_event(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
sas_deform_port(phy, 1);
}
void sas_porte_hard_reset(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
struct asd_sas_phy *phy = ev->phy;
sas_deform_port(phy, 1);
}
/* ---------- SAS port registration ---------- */
static void sas_init_port(struct asd_sas_port *port,
struct sas_ha_struct *sas_ha, int i)
{
memset(port, 0, sizeof(*port));
port->id = i;
INIT_LIST_HEAD(&port->dev_list);
INIT_LIST_HEAD(&port->disco_list);
INIT_LIST_HEAD(&port->destroy_list);
INIT_LIST_HEAD(&port->sas_port_del_list);
spin_lock_init(&port->phy_list_lock);
INIT_LIST_HEAD(&port->phy_list);
port->ha = sas_ha;
spin_lock_init(&port->dev_list_lock);
}
int sas_register_ports(struct sas_ha_struct *sas_ha)
{
int i;
/* initialize the ports and discovery */
for (i = 0; i < sas_ha->num_phys; i++) {
struct asd_sas_port *port = sas_ha->sas_port[i];
sas_init_port(port, sas_ha, i);
sas_init_disc(&port->disc, port);
}
return 0;
}
void sas_unregister_ports(struct sas_ha_struct *sas_ha)
{
int i;
for (i = 0; i < sas_ha->num_phys; i++)
if (sas_ha->sas_phy[i]->port)
sas_deform_port(sas_ha->sas_phy[i], 0);
}
const work_func_t sas_port_event_fns[PORT_NUM_EVENTS] = {
[PORTE_BYTES_DMAED] = sas_porte_bytes_dmaed,
[PORTE_BROADCAST_RCVD] = sas_porte_broadcast_rcvd,
[PORTE_LINK_RESET_ERR] = sas_porte_link_reset_err,
[PORTE_TIMER_EVENT] = sas_porte_timer_event,
[PORTE_HARD_RESET] = sas_porte_hard_reset,
};
| linux-master | drivers/scsi/libsas/sas_port.c |
// SPDX-License-Identifier: GPL-2.0-only
#include "sas_internal.h"
#include <linux/kernel.h>
#include <linux/export.h>
#include <scsi/sas.h>
#include <scsi/libsas.h>
/* fill task_status_struct based on SSP response frame */
void sas_ssp_task_response(struct device *dev, struct sas_task *task,
struct ssp_response_iu *iu)
{
struct task_status_struct *tstat = &task->task_status;
tstat->resp = SAS_TASK_COMPLETE;
switch (iu->datapres) {
case SAS_DATAPRES_NO_DATA:
tstat->stat = iu->status;
break;
case SAS_DATAPRES_RESPONSE_DATA:
tstat->stat = iu->resp_data[3];
break;
case SAS_DATAPRES_SENSE_DATA:
tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
tstat->buf_valid_size =
min_t(int, SAS_STATUS_BUF_SIZE,
be32_to_cpu(iu->sense_data_len));
memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size);
if (iu->status != SAM_STAT_CHECK_CONDITION)
dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n",
SAS_ADDR(task->dev->sas_addr), iu->status);
break;
default:
/* when datapres contains corrupt/unknown value... */
tstat->stat = SAS_SAM_STAT_CHECK_CONDITION;
}
}
EXPORT_SYMBOL_GPL(sas_ssp_task_response);
| linux-master | drivers/scsi/libsas/sas_task.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/string.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include "fnic.h"
static ssize_t fnic_show_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fc_lport *lp = shost_priv(class_to_shost(dev));
struct fnic *fnic = lport_priv(lp);
return snprintf(buf, PAGE_SIZE, "%s\n", fnic_state_str[fnic->state]);
}
static ssize_t fnic_show_drv_version(struct device *dev,
struct device_attribute *attr, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
}
static ssize_t fnic_show_link_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fc_lport *lp = shost_priv(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%s\n", (lp->link_up)
? "Link Up" : "Link Down");
}
static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL);
static DEVICE_ATTR(drv_version, S_IRUGO, fnic_show_drv_version, NULL);
static DEVICE_ATTR(link_state, S_IRUGO, fnic_show_link_state, NULL);
static struct attribute *fnic_host_attrs[] = {
&dev_attr_fnic_state.attr,
&dev_attr_drv_version.attr,
&dev_attr_link_state.attr,
NULL,
};
static const struct attribute_group fnic_host_attr_group = {
.attrs = fnic_host_attrs
};
const struct attribute_group *fnic_host_groups[] = {
&fnic_host_attr_group,
NULL
};
| linux-master | drivers/scsi/fnic/fnic_attrs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_wq_copy.h"
void vnic_wq_copy_enable(struct vnic_wq_copy *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int vnic_wq_copy_disable(struct vnic_wq_copy *wq)
{
unsigned int wait;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable Copy WQ[%d],"
" fetch index=%d, posted_index=%d\n",
wq->index, ioread32(&wq->ctrl->fetch_index),
ioread32(&wq->ctrl->posted_index));
return -ENODEV;
}
void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
void (*q_clean)(struct vnic_wq_copy *wq,
struct fcpio_host_req *wq_desc))
{
BUG_ON(ioread32(&wq->ctrl->enable));
if (vnic_wq_copy_desc_in_use(wq))
vnic_wq_copy_service(wq, -1, q_clean);
wq->to_use_index = wq->to_clean_index = 0;
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
vnic_dev_clear_desc_ring(&wq->ring);
}
void vnic_wq_copy_free(struct vnic_wq_copy *wq)
{
struct vnic_dev *vdev;
vdev = wq->vdev;
vnic_dev_free_desc_ring(vdev, &wq->ring);
wq->ctrl = NULL;
}
int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
unsigned int index, unsigned int desc_count,
unsigned int desc_size)
{
wq->index = index;
wq->vdev = vdev;
wq->to_use_index = wq->to_clean_index = 0;
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
printk(KERN_ERR "Failed to hook COPY WQ[%d] resource\n", index);
return -EINVAL;
}
vnic_wq_copy_disable(wq);
return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
}
void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
}
| linux-master | drivers/scsi/fnic/vnic_wq_copy.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright 2012 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
#include "fnic_io.h"
#include "fnic.h"
unsigned int trace_max_pages;
static int fnic_max_trace_entries;
static unsigned long fnic_trace_buf_p;
static DEFINE_SPINLOCK(fnic_trace_lock);
static fnic_trace_dbg_t fnic_trace_entries;
int fnic_tracing_enabled = 1;
/* static char *fnic_fc_ctlr_trace_buf_p; */
static int fc_trace_max_entries;
static unsigned long fnic_fc_ctlr_trace_buf_p;
static fnic_trace_dbg_t fc_trace_entries;
int fnic_fc_tracing_enabled = 1;
int fnic_fc_trace_cleared = 1;
static DEFINE_SPINLOCK(fnic_fc_trace_lock);
/*
* fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
*
* Description:
* This routine gets next available trace buffer entry location @wr_idx
* from allocated trace buffer pages and give that memory location
* to user to store the trace information.
*
* Return Value:
* This routine returns pointer to next available trace entry
* @fnic_buf_head for user to fill trace information.
*/
fnic_trace_data_t *fnic_trace_get_buf(void)
{
unsigned long fnic_buf_head;
unsigned long flags;
spin_lock_irqsave(&fnic_trace_lock, flags);
/*
* Get next available memory location for writing trace information
* at @wr_idx and increment @wr_idx
*/
fnic_buf_head =
fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
fnic_trace_entries.wr_idx++;
/*
* Verify if trace buffer is full then change wd_idx to
* start from zero
*/
if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
fnic_trace_entries.wr_idx = 0;
/*
* Verify if write index @wr_idx and read index @rd_idx are same then
* increment @rd_idx to move to next entry in trace buffer
*/
if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
fnic_trace_entries.rd_idx++;
if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
fnic_trace_entries.rd_idx = 0;
}
spin_unlock_irqrestore(&fnic_trace_lock, flags);
return (fnic_trace_data_t *)fnic_buf_head;
}
/*
* fnic_get_trace_data - Copy trace buffer to a memory file
* @fnic_dbgfs_t: pointer to debugfs trace buffer
*
* Description:
* This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
* buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
* the log and process the log until the end of the buffer. Then it will gather
* from the beginning of the log and process until the current entry @wr_idx.
*
* Return Value:
* This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
*/
int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
{
int rd_idx;
int wr_idx;
int len = 0;
unsigned long flags;
char str[KSYM_SYMBOL_LEN];
struct timespec64 val;
fnic_trace_data_t *tbp;
spin_lock_irqsave(&fnic_trace_lock, flags);
rd_idx = fnic_trace_entries.rd_idx;
wr_idx = fnic_trace_entries.wr_idx;
if (wr_idx < rd_idx) {
while (1) {
/* Start from read index @rd_idx */
tbp = (fnic_trace_data_t *)
fnic_trace_entries.page_offset[rd_idx];
if (!tbp) {
spin_unlock_irqrestore(&fnic_trace_lock, flags);
return 0;
}
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
* and increment read index @rd_idx
*/
len += scnprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
rd_idx++;
/*
* If rd_idx is reached to maximum trace entries
* then move rd_idx to zero
*/
if (rd_idx > (fnic_max_trace_entries-1))
rd_idx = 0;
/*
* Continue dumping trace buffer entries into
* memory file till rd_idx reaches write index
*/
if (rd_idx == wr_idx)
break;
}
} else if (wr_idx > rd_idx) {
while (1) {
/* Start from read index @rd_idx */
tbp = (fnic_trace_data_t *)
fnic_trace_entries.page_offset[rd_idx];
if (!tbp) {
spin_unlock_irqrestore(&fnic_trace_lock, flags);
return 0;
}
/* Convert function pointer to function name */
if (sizeof(unsigned long) < 8) {
sprint_symbol(str, tbp->fnaddr.low);
jiffies_to_timespec64(tbp->timestamp.low, &val);
} else {
sprint_symbol(str, tbp->fnaddr.val);
jiffies_to_timespec64(tbp->timestamp.val, &val);
}
/*
* Dump trace buffer entry to memory file
* and increment read index @rd_idx
*/
len += scnprintf(fnic_dbgfs_prt->buffer + len,
(trace_max_pages * PAGE_SIZE * 3) - len,
"%16llu.%09lu %-50s %8x %8x %16llx %16llx "
"%16llx %16llx %16llx\n", (u64)val.tv_sec,
val.tv_nsec, str, tbp->host_no, tbp->tag,
tbp->data[0], tbp->data[1], tbp->data[2],
tbp->data[3], tbp->data[4]);
rd_idx++;
/*
* Continue dumping trace buffer entries into
* memory file till rd_idx reaches write index
*/
if (rd_idx == wr_idx)
break;
}
}
spin_unlock_irqrestore(&fnic_trace_lock, flags);
return len;
}
/*
* fnic_get_stats_data - Copy fnic stats buffer to a memory file
* @fnic_dbgfs_t: pointer to debugfs fnic stats buffer
*
* Description:
* This routine gathers the fnic stats debugfs data from the fnic_stats struct
* and dumps it to stats_debug_info.
*
* Return Value:
* This routine returns the amount of bytes that were dumped into
* stats_debug_info
*/
int fnic_get_stats_data(struct stats_debug_info *debug,
struct fnic_stats *stats)
{
int len = 0;
int buf_size = debug->buf_size;
struct timespec64 val1, val2;
ktime_get_real_ts64(&val1);
len = scnprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tTime\n"
"------------------------------------------\n");
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Current time : [%lld:%ld]\n"
"Last stats reset time: [%lld:%09ld]\n"
"Last stats read time: [%lld:%ld]\n"
"delta since last reset: [%lld:%ld]\n"
"delta since last read: [%lld:%ld]\n",
(s64)val1.tv_sec, val1.tv_nsec,
(s64)stats->stats_timestamps.last_reset_time.tv_sec,
stats->stats_timestamps.last_reset_time.tv_nsec,
(s64)stats->stats_timestamps.last_read_time.tv_sec,
stats->stats_timestamps.last_read_time.tv_nsec,
(s64)timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
timespec64_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
(s64)timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
timespec64_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
stats->stats_timestamps.last_read_time = val1;
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"------------------------------------------\n"
"\t\tIO Statistics\n"
"------------------------------------------\n");
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Active IOs: %lld\nMaximum Active IOs: %lld\n"
"Number of IOs: %lld\nNumber of IO Completions: %lld\n"
"Number of IO Failures: %lld\nNumber of IO NOT Found: %lld\n"
"Number of Memory alloc Failures: %lld\n"
"Number of IOREQ Null: %lld\n"
"Number of SCSI cmd pointer Null: %lld\n"
"\nIO completion times: \n"
" < 10 ms : %lld\n"
" 10 ms - 100 ms : %lld\n"
" 100 ms - 500 ms : %lld\n"
" 500 ms - 5 sec: %lld\n"
" 5 sec - 10 sec: %lld\n"
" 10 sec - 30 sec: %lld\n"
" > 30 sec: %lld\n",
(u64)atomic64_read(&stats->io_stats.active_ios),
(u64)atomic64_read(&stats->io_stats.max_active_ios),
(u64)atomic64_read(&stats->io_stats.num_ios),
(u64)atomic64_read(&stats->io_stats.io_completions),
(u64)atomic64_read(&stats->io_stats.io_failures),
(u64)atomic64_read(&stats->io_stats.io_not_found),
(u64)atomic64_read(&stats->io_stats.alloc_failures),
(u64)atomic64_read(&stats->io_stats.ioreq_null),
(u64)atomic64_read(&stats->io_stats.sc_null),
(u64)atomic64_read(&stats->io_stats.io_btw_0_to_10_msec),
(u64)atomic64_read(&stats->io_stats.io_btw_10_to_100_msec),
(u64)atomic64_read(&stats->io_stats.io_btw_100_to_500_msec),
(u64)atomic64_read(&stats->io_stats.io_btw_500_to_5000_msec),
(u64)atomic64_read(&stats->io_stats.io_btw_5000_to_10000_msec),
(u64)atomic64_read(&stats->io_stats.io_btw_10000_to_30000_msec),
(u64)atomic64_read(&stats->io_stats.io_greater_than_30000_msec));
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\nCurrent Max IO time : %lld\n",
(u64)atomic64_read(&stats->io_stats.current_max_io_time));
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tAbort Statistics\n"
"------------------------------------------\n");
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Aborts: %lld\n"
"Number of Abort Failures: %lld\n"
"Number of Abort Driver Timeouts: %lld\n"
"Number of Abort FW Timeouts: %lld\n"
"Number of Abort IO NOT Found: %lld\n"
"Abort issued times: \n"
" < 6 sec : %lld\n"
" 6 sec - 20 sec : %lld\n"
" 20 sec - 30 sec : %lld\n"
" 30 sec - 40 sec : %lld\n"
" 40 sec - 50 sec : %lld\n"
" 50 sec - 60 sec : %lld\n"
" > 60 sec: %lld\n",
(u64)atomic64_read(&stats->abts_stats.aborts),
(u64)atomic64_read(&stats->abts_stats.abort_failures),
(u64)atomic64_read(&stats->abts_stats.abort_drv_timeouts),
(u64)atomic64_read(&stats->abts_stats.abort_fw_timeouts),
(u64)atomic64_read(&stats->abts_stats.abort_io_not_found),
(u64)atomic64_read(&stats->abts_stats.abort_issued_btw_0_to_6_sec),
(u64)atomic64_read(&stats->abts_stats.abort_issued_btw_6_to_20_sec),
(u64)atomic64_read(&stats->abts_stats.abort_issued_btw_20_to_30_sec),
(u64)atomic64_read(&stats->abts_stats.abort_issued_btw_30_to_40_sec),
(u64)atomic64_read(&stats->abts_stats.abort_issued_btw_40_to_50_sec),
(u64)atomic64_read(&stats->abts_stats.abort_issued_btw_50_to_60_sec),
(u64)atomic64_read(&stats->abts_stats.abort_issued_greater_than_60_sec));
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tTerminate Statistics\n"
"------------------------------------------\n");
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Terminates: %lld\n"
"Maximum Terminates: %lld\n"
"Number of Terminate Driver Timeouts: %lld\n"
"Number of Terminate FW Timeouts: %lld\n"
"Number of Terminate IO NOT Found: %lld\n"
"Number of Terminate Failures: %lld\n",
(u64)atomic64_read(&stats->term_stats.terminates),
(u64)atomic64_read(&stats->term_stats.max_terminates),
(u64)atomic64_read(&stats->term_stats.terminate_drv_timeouts),
(u64)atomic64_read(&stats->term_stats.terminate_fw_timeouts),
(u64)atomic64_read(&stats->term_stats.terminate_io_not_found),
(u64)atomic64_read(&stats->term_stats.terminate_failures));
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tReset Statistics\n"
"------------------------------------------\n");
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Device Resets: %lld\n"
"Number of Device Reset Failures: %lld\n"
"Number of Device Reset Aborts: %lld\n"
"Number of Device Reset Timeouts: %lld\n"
"Number of Device Reset Terminates: %lld\n"
"Number of FW Resets: %lld\n"
"Number of FW Reset Completions: %lld\n"
"Number of FW Reset Failures: %lld\n"
"Number of Fnic Reset: %lld\n"
"Number of Fnic Reset Completions: %lld\n"
"Number of Fnic Reset Failures: %lld\n",
(u64)atomic64_read(&stats->reset_stats.device_resets),
(u64)atomic64_read(&stats->reset_stats.device_reset_failures),
(u64)atomic64_read(&stats->reset_stats.device_reset_aborts),
(u64)atomic64_read(&stats->reset_stats.device_reset_timeouts),
(u64)atomic64_read(
&stats->reset_stats.device_reset_terminates),
(u64)atomic64_read(&stats->reset_stats.fw_resets),
(u64)atomic64_read(&stats->reset_stats.fw_reset_completions),
(u64)atomic64_read(&stats->reset_stats.fw_reset_failures),
(u64)atomic64_read(&stats->reset_stats.fnic_resets),
(u64)atomic64_read(
&stats->reset_stats.fnic_reset_completions),
(u64)atomic64_read(&stats->reset_stats.fnic_reset_failures));
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tFirmware Statistics\n"
"------------------------------------------\n");
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Active FW Requests %lld\n"
"Maximum FW Requests: %lld\n"
"Number of FW out of resources: %lld\n"
"Number of FW IO errors: %lld\n",
(u64)atomic64_read(&stats->fw_stats.active_fw_reqs),
(u64)atomic64_read(&stats->fw_stats.max_fw_reqs),
(u64)atomic64_read(&stats->fw_stats.fw_out_of_resources),
(u64)atomic64_read(&stats->fw_stats.io_fw_errs));
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tVlan Discovery Statistics\n"
"------------------------------------------\n");
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Number of Vlan Discovery Requests Sent %lld\n"
"Vlan Response Received with no FCF VLAN ID: %lld\n"
"No solicitations recvd after vlan set, expiry count: %lld\n"
"Flogi rejects count: %lld\n",
(u64)atomic64_read(&stats->vlan_stats.vlan_disc_reqs),
(u64)atomic64_read(&stats->vlan_stats.resp_withno_vlanID),
(u64)atomic64_read(&stats->vlan_stats.sol_expiry_count),
(u64)atomic64_read(&stats->vlan_stats.flogi_rejects));
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"\n------------------------------------------\n"
"\t\tOther Important Statistics\n"
"------------------------------------------\n");
jiffies_to_timespec64(stats->misc_stats.last_isr_time, &val1);
jiffies_to_timespec64(stats->misc_stats.last_ack_time, &val2);
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Last ISR time: %llu (%8llu.%09lu)\n"
"Last ACK time: %llu (%8llu.%09lu)\n"
"Max ISR jiffies: %llu\n"
"Max ISR time (ms) (0 denotes < 1 ms): %llu\n"
"Corr. work done: %llu\n"
"Number of ISRs: %lld\n"
"Maximum CQ Entries: %lld\n"
"Number of ACK index out of range: %lld\n"
"Number of data count mismatch: %lld\n"
"Number of FCPIO Timeouts: %lld\n"
"Number of FCPIO Aborted: %lld\n"
"Number of SGL Invalid: %lld\n"
"Number of Copy WQ Alloc Failures for ABTs: %lld\n"
"Number of Copy WQ Alloc Failures for Device Reset: %lld\n"
"Number of Copy WQ Alloc Failures for IOs: %lld\n"
"Number of no icmnd itmf Completions: %lld\n"
"Number of Check Conditions encountered: %lld\n"
"Number of QUEUE Fulls: %lld\n"
"Number of rport not ready: %lld\n"
"Number of receive frame errors: %lld\n",
(u64)stats->misc_stats.last_isr_time,
(s64)val1.tv_sec, val1.tv_nsec,
(u64)stats->misc_stats.last_ack_time,
(s64)val2.tv_sec, val2.tv_nsec,
(u64)atomic64_read(&stats->misc_stats.max_isr_jiffies),
(u64)atomic64_read(&stats->misc_stats.max_isr_time_ms),
(u64)atomic64_read(&stats->misc_stats.corr_work_done),
(u64)atomic64_read(&stats->misc_stats.isr_count),
(u64)atomic64_read(&stats->misc_stats.max_cq_entries),
(u64)atomic64_read(&stats->misc_stats.ack_index_out_of_range),
(u64)atomic64_read(&stats->misc_stats.data_count_mismatch),
(u64)atomic64_read(&stats->misc_stats.fcpio_timeout),
(u64)atomic64_read(&stats->misc_stats.fcpio_aborted),
(u64)atomic64_read(&stats->misc_stats.sgl_invalid),
(u64)atomic64_read(
&stats->misc_stats.abts_cpwq_alloc_failures),
(u64)atomic64_read(
&stats->misc_stats.devrst_cpwq_alloc_failures),
(u64)atomic64_read(&stats->misc_stats.io_cpwq_alloc_failures),
(u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls),
(u64)atomic64_read(&stats->misc_stats.check_condition),
(u64)atomic64_read(&stats->misc_stats.queue_fulls),
(u64)atomic64_read(&stats->misc_stats.rport_not_ready),
(u64)atomic64_read(&stats->misc_stats.frame_errors));
len += scnprintf(debug->debug_buffer + len, buf_size - len,
"Firmware reported port speed: %llu\n",
(u64)atomic64_read(
&stats->misc_stats.current_port_speed));
return len;
}
/*
* fnic_trace_buf_init - Initialize fnic trace buffer logging facility
*
* Description:
* Initialize trace buffer data structure by allocating required memory and
* setting page_offset information for every trace entry by adding trace entry
* length to previous page_offset value.
*/
int fnic_trace_buf_init(void)
{
unsigned long fnic_buf_head;
int i;
int err = 0;
trace_max_pages = fnic_trace_max_pages;
fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
FNIC_ENTRY_SIZE_BYTES;
fnic_trace_buf_p = (unsigned long)vcalloc(trace_max_pages, PAGE_SIZE);
if (!fnic_trace_buf_p) {
printk(KERN_ERR PFX "Failed to allocate memory "
"for fnic_trace_buf_p\n");
err = -ENOMEM;
goto err_fnic_trace_buf_init;
}
fnic_trace_entries.page_offset =
vmalloc(array_size(fnic_max_trace_entries,
sizeof(unsigned long)));
if (!fnic_trace_entries.page_offset) {
printk(KERN_ERR PFX "Failed to allocate memory for"
" page_offset\n");
if (fnic_trace_buf_p) {
vfree((void *)fnic_trace_buf_p);
fnic_trace_buf_p = 0;
}
err = -ENOMEM;
goto err_fnic_trace_buf_init;
}
memset((void *)fnic_trace_entries.page_offset, 0,
(fnic_max_trace_entries * sizeof(unsigned long)));
fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
fnic_buf_head = fnic_trace_buf_p;
/*
* Set page_offset field of fnic_trace_entries struct by
* calculating memory location for every trace entry using
* length of each trace entry
*/
for (i = 0; i < fnic_max_trace_entries; i++) {
fnic_trace_entries.page_offset[i] = fnic_buf_head;
fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
}
fnic_trace_debugfs_init();
pr_info("fnic: Successfully Initialized Trace Buffer\n");
return err;
err_fnic_trace_buf_init:
return err;
}
/*
* fnic_trace_free - Free memory of fnic trace data structures.
*/
void fnic_trace_free(void)
{
fnic_tracing_enabled = 0;
fnic_trace_debugfs_terminate();
if (fnic_trace_entries.page_offset) {
vfree((void *)fnic_trace_entries.page_offset);
fnic_trace_entries.page_offset = NULL;
}
if (fnic_trace_buf_p) {
vfree((void *)fnic_trace_buf_p);
fnic_trace_buf_p = 0;
}
printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
}
/*
* fnic_fc_ctlr_trace_buf_init -
* Initialize trace buffer to log fnic control frames
* Description:
* Initialize trace buffer data structure by allocating
* required memory for trace data as well as for Indexes.
* Frame size is 256 bytes and
* memory is allocated for 1024 entries of 256 bytes.
* Page_offset(Index) is set to the address of trace entry
* and page_offset is initialized by adding frame size
* to the previous page_offset entry.
*/
int fnic_fc_trace_init(void)
{
unsigned long fc_trace_buf_head;
int err = 0;
int i;
fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
FC_TRC_SIZE_BYTES;
fnic_fc_ctlr_trace_buf_p =
(unsigned long)vmalloc(array_size(PAGE_SIZE,
fnic_fc_trace_max_pages));
if (!fnic_fc_ctlr_trace_buf_p) {
pr_err("fnic: Failed to allocate memory for "
"FC Control Trace Buf\n");
err = -ENOMEM;
goto err_fnic_fc_ctlr_trace_buf_init;
}
memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
fnic_fc_trace_max_pages * PAGE_SIZE);
/* Allocate memory for page offset */
fc_trace_entries.page_offset =
vmalloc(array_size(fc_trace_max_entries,
sizeof(unsigned long)));
if (!fc_trace_entries.page_offset) {
pr_err("fnic:Failed to allocate memory for page_offset\n");
if (fnic_fc_ctlr_trace_buf_p) {
pr_err("fnic: Freeing FC Control Trace Buf\n");
vfree((void *)fnic_fc_ctlr_trace_buf_p);
fnic_fc_ctlr_trace_buf_p = 0;
}
err = -ENOMEM;
goto err_fnic_fc_ctlr_trace_buf_init;
}
memset((void *)fc_trace_entries.page_offset, 0,
(fc_trace_max_entries * sizeof(unsigned long)));
fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
/*
* Set up fc_trace_entries.page_offset field with memory location
* for every trace entry
*/
for (i = 0; i < fc_trace_max_entries; i++) {
fc_trace_entries.page_offset[i] = fc_trace_buf_head;
fc_trace_buf_head += FC_TRC_SIZE_BYTES;
}
fnic_fc_trace_debugfs_init();
pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n");
return err;
err_fnic_fc_ctlr_trace_buf_init:
return err;
}
/*
* Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures.
*/
void fnic_fc_trace_free(void)
{
fnic_fc_tracing_enabled = 0;
fnic_fc_trace_debugfs_terminate();
if (fc_trace_entries.page_offset) {
vfree((void *)fc_trace_entries.page_offset);
fc_trace_entries.page_offset = NULL;
}
if (fnic_fc_ctlr_trace_buf_p) {
vfree((void *)fnic_fc_ctlr_trace_buf_p);
fnic_fc_ctlr_trace_buf_p = 0;
}
pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n");
}
/*
* fnic_fc_ctlr_set_trace_data:
* Maintain rd & wr idx accordingly and set data
* Passed parameters:
* host_no: host number associated with fnic
* frame_type: send_frame, rece_frame or link event
* fc_frame: pointer to fc_frame
* frame_len: Length of the fc_frame
* Description:
* This routine will get next available wr_idx and
* copy all passed trace data to the buffer pointed by wr_idx
* and increment wr_idx. It will also make sure that we dont
* overwrite the entry which we are reading and also
* wrap around if we reach the maximum entries.
* Returned Value:
* It will return 0 for success or -1 for failure
*/
int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
char *frame, u32 fc_trc_frame_len)
{
unsigned long flags;
struct fc_trace_hdr *fc_buf;
unsigned long eth_fcoe_hdr_len;
char *fc_trace;
if (fnic_fc_tracing_enabled == 0)
return 0;
spin_lock_irqsave(&fnic_fc_trace_lock, flags);
if (fnic_fc_trace_cleared == 1) {
fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
pr_info("fnic: Resetting the read idx\n");
memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
fnic_fc_trace_max_pages * PAGE_SIZE);
fnic_fc_trace_cleared = 0;
}
fc_buf = (struct fc_trace_hdr *)
fc_trace_entries.page_offset[fc_trace_entries.wr_idx];
fc_trace_entries.wr_idx++;
if (fc_trace_entries.wr_idx >= fc_trace_max_entries)
fc_trace_entries.wr_idx = 0;
if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
fc_trace_entries.rd_idx++;
if (fc_trace_entries.rd_idx >= fc_trace_max_entries)
fc_trace_entries.rd_idx = 0;
}
ktime_get_real_ts64(&fc_buf->time_stamp);
fc_buf->host_no = host_no;
fc_buf->frame_type = frame_type;
fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf);
/* During the receive path, we do not have eth hdr as well as fcoe hdr
* at trace entry point so we will stuff 0xff just to make it generic.
*/
if (frame_type == FNIC_FC_RECV) {
eth_fcoe_hdr_len = sizeof(struct ethhdr) +
sizeof(struct fcoe_hdr);
memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
/* Copy the rest of data frame */
memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
min_t(u8, fc_trc_frame_len,
(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE
- eth_fcoe_hdr_len)));
} else {
memcpy((char *)fc_trace, (void *)frame,
min_t(u8, fc_trc_frame_len,
(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
}
/* Store the actual received length */
fc_buf->frame_len = fc_trc_frame_len;
spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
return 0;
}
/*
* fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
* Passed parameter:
* @fnic_dbgfs_t: pointer to debugfs trace buffer
* rdata_flag: 1 => Unformatted file
* 0 => formatted file
* Description:
* This routine will copy the trace data to memory file with
* proper formatting and also copy to another memory
* file without formatting for further processing.
* Return Value:
* Number of bytes that were dumped into fnic_dbgfs_t
*/
int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
{
int rd_idx, wr_idx;
unsigned long flags;
int len = 0, j;
struct fc_trace_hdr *tdata;
char *fc_trace;
spin_lock_irqsave(&fnic_fc_trace_lock, flags);
if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
pr_info("fnic: Buffer is empty\n");
return 0;
}
rd_idx = fc_trace_entries.rd_idx;
wr_idx = fc_trace_entries.wr_idx;
if (rdata_flag == 0) {
len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
"Time Stamp (UTC)\t\t"
"Host No: F Type: len: FCoE_FRAME:\n");
}
while (rd_idx != wr_idx) {
tdata = (struct fc_trace_hdr *)
fc_trace_entries.page_offset[rd_idx];
if (!tdata) {
pr_info("fnic: Rd data is NULL\n");
spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
return 0;
}
if (rdata_flag == 0) {
copy_and_format_trace_data(tdata,
fnic_dbgfs_prt, &len, rdata_flag);
} else {
fc_trace = (char *)tdata;
for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3)
- len, "%02x", fc_trace[j] & 0xff);
} /* for loop */
len += scnprintf(fnic_dbgfs_prt->buffer + len,
(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
"\n");
}
rd_idx++;
if (rd_idx > (fc_trace_max_entries - 1))
rd_idx = 0;
}
spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
return len;
}
/*
* copy_and_format_trace_data: Copy formatted data to char * buffer
* Passed Parameter:
* @fc_trace_hdr_t: pointer to trace data
* @fnic_dbgfs_t: pointer to debugfs trace buffer
* @orig_len: pointer to len
* rdata_flag: 0 => Formatted file, 1 => Unformatted file
* Description:
* This routine will format and copy the passed trace data
* for formatted file or unformatted file accordingly.
*/
void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len,
u8 rdata_flag)
{
int j, i = 1, len;
int ethhdr_len = sizeof(struct ethhdr) - 1;
int fcoehdr_len = sizeof(struct fcoe_hdr);
int fchdr_len = sizeof(struct fc_frame_header);
int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3;
char *fc_trace;
tdata->frame_type = tdata->frame_type & 0x7F;
len = *orig_len;
len += scnprintf(fnic_dbgfs_prt->buffer + len, max_size - len,
"%ptTs.%09lu ns%8x %c%8x\t",
&tdata->time_stamp.tv_sec, tdata->time_stamp.tv_nsec,
tdata->host_no, tdata->frame_type, tdata->frame_len);
fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
for (j = 0; j < min_t(u8, tdata->frame_len,
(u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
if (tdata->frame_type == FNIC_FC_LE) {
len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "%c", fc_trace[j]);
} else {
len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "%02x", fc_trace[j] & 0xff);
len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, " ");
if (j == ethhdr_len ||
j == ethhdr_len + fcoehdr_len ||
j == ethhdr_len + fcoehdr_len + fchdr_len ||
(i > 3 && j%fchdr_len == 0)) {
len += scnprintf(fnic_dbgfs_prt->buffer
+ len, max_size - len,
"\n\t\t\t\t\t\t\t\t");
i++;
}
} /* end of else*/
} /* End of for loop*/
len += scnprintf(fnic_dbgfs_prt->buffer + len,
max_size - len, "\n");
*orig_len = len;
}
| linux-master | drivers/scsi/fnic/fnic_trace.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
void vnic_intr_free(struct vnic_intr *intr)
{
intr->ctrl = NULL;
}
int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index)
{
intr->index = index;
intr->vdev = vdev;
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
printk(KERN_ERR "Failed to hook INTR[%d].ctrl resource\n",
index);
return -EINVAL;
}
return 0;
}
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
}
| linux-master | drivers/scsi/fnic/vnic_intr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/fc_frame.h>
#include <scsi/libfc.h>
#include "fnic_io.h"
#include "fnic.h"
#include "fnic_fip.h"
#include "cq_enet_desc.h"
#include "cq_exch_desc.h"
static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
struct workqueue_struct *fnic_fip_queue;
struct workqueue_struct *fnic_event_queue;
static void fnic_set_eth_mode(struct fnic *);
static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
void fnic_handle_link(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, link_work);
unsigned long flags;
int old_link_status;
u32 old_link_down_cnt;
u64 old_port_speed, new_port_speed;
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->link_events = 1; /* less work to just set everytime*/
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
old_link_down_cnt = fnic->link_down_cnt;
old_link_status = fnic->link_status;
old_port_speed = atomic64_read(
&fnic->fnic_stats.misc_stats.current_port_speed);
fnic->link_status = vnic_dev_link_status(fnic->vdev);
fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
new_port_speed = vnic_dev_port_speed(fnic->vdev);
atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
new_port_speed);
if (old_port_speed != new_port_speed)
FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
"Current vnic speed set to : %llu\n",
new_port_speed);
switch (vnic_dev_port_speed(fnic->vdev)) {
case DCEM_PORTSPEED_10G:
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
break;
case DCEM_PORTSPEED_20G:
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
break;
case DCEM_PORTSPEED_25G:
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
break;
case DCEM_PORTSPEED_40G:
case DCEM_PORTSPEED_4x10G:
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
break;
case DCEM_PORTSPEED_100G:
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
break;
default:
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
break;
}
if (old_link_status == fnic->link_status) {
if (!fnic->link_status) {
/* DOWN -> DOWN */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_fc_trace_set_data(fnic->lport->host->host_no,
FNIC_FC_LE, "Link Status: DOWN->DOWN",
strlen("Link Status: DOWN->DOWN"));
} else {
if (old_link_down_cnt != fnic->link_down_cnt) {
/* UP -> DOWN -> UP */
fnic->lport->host_stats.link_failure_count++;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_fc_trace_set_data(
fnic->lport->host->host_no,
FNIC_FC_LE,
"Link Status:UP_DOWN_UP",
strlen("Link_Status:UP_DOWN_UP")
);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link down\n");
fcoe_ctlr_link_down(&fnic->ctlr);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
/* start FCoE VLAN discovery */
fnic_fc_trace_set_data(
fnic->lport->host->host_no,
FNIC_FC_LE,
"Link Status: UP_DOWN_UP_VLAN",
strlen(
"Link Status: UP_DOWN_UP_VLAN")
);
fnic_fcoe_send_vlan_req(fnic);
return;
}
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"link up\n");
fcoe_ctlr_link_up(&fnic->ctlr);
} else {
/* UP -> UP */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_fc_trace_set_data(
fnic->lport->host->host_no, FNIC_FC_LE,
"Link Status: UP_UP",
strlen("Link Status: UP_UP"));
}
}
} else if (fnic->link_status) {
/* DOWN -> UP */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
/* start FCoE VLAN discovery */
fnic_fc_trace_set_data(
fnic->lport->host->host_no,
FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
strlen("Link Status: DOWN_UP_VLAN"));
fnic_fcoe_send_vlan_req(fnic);
return;
}
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
"Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
fcoe_ctlr_link_up(&fnic->ctlr);
} else {
/* UP -> DOWN */
fnic->lport->host_stats.link_failure_count++;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
fnic_fc_trace_set_data(
fnic->lport->host->host_no, FNIC_FC_LE,
"Link Status: UP_DOWN",
strlen("Link Status: UP_DOWN"));
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"deleting fip-timer during link-down\n");
del_timer_sync(&fnic->fip_timer);
}
fcoe_ctlr_link_down(&fnic->ctlr);
}
}
/*
* This function passes incoming fabric frames to libFC
*/
void fnic_handle_frame(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, frame_work);
struct fc_lport *lp = fnic->lport;
unsigned long flags;
struct sk_buff *skb;
struct fc_frame *fp;
while ((skb = skb_dequeue(&fnic->frame_queue))) {
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
dev_kfree_skb(skb);
return;
}
fp = (struct fc_frame *)skb;
/*
* If we're in a transitional state, just re-queue and return.
* The queue will be serviced when we get to a stable state.
*/
if (fnic->state != FNIC_IN_FC_MODE &&
fnic->state != FNIC_IN_ETH_MODE) {
skb_queue_head(&fnic->frame_queue, skb);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fc_exch_recv(lp, fp);
}
}
void fnic_fcoe_evlist_free(struct fnic *fnic)
{
struct fnic_event *fevt = NULL;
struct fnic_event *next = NULL;
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (list_empty(&fnic->evlist)) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
list_del(&fevt->list);
kfree(fevt);
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
void fnic_handle_event(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, event_work);
struct fnic_event *fevt = NULL;
struct fnic_event *next = NULL;
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (list_empty(&fnic->evlist)) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
if (fnic->stop_rx_link_events) {
list_del(&fevt->list);
kfree(fevt);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
/*
* If we're in a transitional state, just re-queue and return.
* The queue will be serviced when we get to a stable state.
*/
if (fnic->state != FNIC_IN_FC_MODE &&
fnic->state != FNIC_IN_ETH_MODE) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
list_del(&fevt->list);
switch (fevt->event) {
case FNIC_EVT_START_VLAN_DISC:
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_fcoe_send_vlan_req(fnic);
spin_lock_irqsave(&fnic->fnic_lock, flags);
break;
case FNIC_EVT_START_FCF_DISC:
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Start FCF Discovery\n");
fnic_fcoe_start_fcf_disc(fnic);
break;
default:
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Unknown event 0x%x\n", fevt->event);
break;
}
kfree(fevt);
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
/**
* is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected
* @fip: The FCoE controller that received the frame
* @skb: The received FIP frame
*
* Returns non-zero if the frame is rejected with unsupported cmd with
* insufficient resource els explanation.
*/
static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
struct sk_buff *skb)
{
struct fc_lport *lport = fip->lp;
struct fip_header *fiph;
struct fc_frame_header *fh = NULL;
struct fip_desc *desc;
struct fip_encaps *els;
u16 op;
u8 els_op;
u8 sub;
size_t rlen;
size_t dlen = 0;
if (skb_linearize(skb))
return 0;
if (skb->len < sizeof(*fiph))
return 0;
fiph = (struct fip_header *)skb->data;
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
if (op != FIP_OP_LS)
return 0;
if (sub != FIP_SC_REP)
return 0;
rlen = ntohs(fiph->fip_dl_len) * 4;
if (rlen + sizeof(*fiph) > skb->len)
return 0;
desc = (struct fip_desc *)(fiph + 1);
dlen = desc->fip_dlen * FIP_BPW;
if (desc->fip_dtype == FIP_DT_FLOGI) {
if (dlen < sizeof(*els) + sizeof(*fh) + 1)
return 0;
els = (struct fip_encaps *)desc;
fh = (struct fc_frame_header *)(els + 1);
if (!fh)
return 0;
/*
* ELS command code, reason and explanation should be = Reject,
* unsupported command and insufficient resource
*/
els_op = *(u8 *)(fh + 1);
if (els_op == ELS_LS_RJT) {
shost_printk(KERN_INFO, lport->host,
"Flogi Request Rejected by Switch\n");
return 1;
}
shost_printk(KERN_INFO, lport->host,
"Flogi Request Accepted by Switch\n");
}
return 0;
}
static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
{
struct fcoe_ctlr *fip = &fnic->ctlr;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct sk_buff *skb;
char *eth_fr;
struct fip_vlan *vlan;
u64 vlan_tov;
fnic_fcoe_reset_vlans(fnic);
fnic->set_vlan(fnic, 0);
if (printk_ratelimit())
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"Sending VLAN request...\n");
skb = dev_alloc_skb(sizeof(struct fip_vlan));
if (!skb)
return;
eth_fr = (char *)skb->data;
vlan = (struct fip_vlan *)eth_fr;
memset(vlan, 0, sizeof(*vlan));
memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
vlan->eth.h_proto = htons(ETH_P_FIP);
vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
vlan->fip.fip_op = htons(FIP_OP_VLAN);
vlan->fip.fip_subcode = FIP_SC_VL_REQ;
vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
skb_put(skb, sizeof(*vlan));
skb->protocol = htons(ETH_P_FIP);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
fip->send(fip, skb);
/* set a timer so that we can retry if there no response */
vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
}
static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
{
struct fcoe_ctlr *fip = &fnic->ctlr;
struct fip_header *fiph;
struct fip_desc *desc;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
u16 vid;
size_t rlen;
size_t dlen;
struct fcoe_vlan *vlan;
u64 sol_time;
unsigned long flags;
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"Received VLAN response...\n");
fiph = (struct fip_header *) skb->data;
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
ntohs(fiph->fip_op), fiph->fip_subcode);
rlen = ntohs(fiph->fip_dl_len) * 4;
fnic_fcoe_reset_vlans(fnic);
spin_lock_irqsave(&fnic->vlans_lock, flags);
desc = (struct fip_desc *)(fiph + 1);
while (rlen > 0) {
dlen = desc->fip_dlen * FIP_BPW;
switch (desc->fip_dtype) {
case FIP_DT_VLAN:
vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
shost_printk(KERN_INFO, fnic->lport->host,
"process_vlan_resp: FIP VLAN %d\n", vid);
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
if (!vlan) {
/* retry from timer */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
goto out;
}
vlan->vid = vid & 0x0fff;
vlan->state = FIP_VLAN_AVAIL;
list_add_tail(&vlan->list, &fnic->vlans);
break;
}
desc = (struct fip_desc *)((char *)desc + dlen);
rlen -= dlen;
}
/* any VLAN descriptors present ? */
if (list_empty(&fnic->vlans)) {
/* retry from timer */
atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"No VLAN descriptors in FIP VLAN response\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
goto out;
}
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
fnic->set_vlan(fnic, vlan->vid);
vlan->state = FIP_VLAN_SENT; /* sent now */
vlan->sol_count++;
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
/* start the solicitation */
fcoe_ctlr_link_up(fip);
sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
out:
return;
}
static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
{
unsigned long flags;
struct fcoe_vlan *vlan;
u64 sol_time;
spin_lock_irqsave(&fnic->vlans_lock, flags);
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
fnic->set_vlan(fnic, vlan->vid);
vlan->state = FIP_VLAN_SENT; /* sent now */
vlan->sol_count = 1;
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
/* start the solicitation */
fcoe_ctlr_link_up(&fnic->ctlr);
sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
}
static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
{
unsigned long flags;
struct fcoe_vlan *fvlan;
spin_lock_irqsave(&fnic->vlans_lock, flags);
if (list_empty(&fnic->vlans)) {
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
return -EINVAL;
}
fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
if (fvlan->state == FIP_VLAN_USED) {
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
return 0;
}
if (fvlan->state == FIP_VLAN_SENT) {
fvlan->state = FIP_VLAN_USED;
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
return 0;
}
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
return -EINVAL;
}
static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
{
struct fnic_event *fevt;
unsigned long flags;
fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
if (!fevt)
return;
fevt->fnic = fnic;
fevt->event = ev;
spin_lock_irqsave(&fnic->fnic_lock, flags);
list_add_tail(&fevt->list, &fnic->evlist);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
schedule_work(&fnic->event_work);
}
static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
{
struct fip_header *fiph;
int ret = 1;
u16 op;
u8 sub;
if (!skb || !(skb->data))
return -1;
if (skb_linearize(skb))
goto drop;
fiph = (struct fip_header *)skb->data;
op = ntohs(fiph->fip_op);
sub = fiph->fip_subcode;
if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
goto drop;
if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
goto drop;
if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
goto drop;
/* pass it on to fcoe */
ret = 1;
} else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
/* set the vlan as used */
fnic_fcoe_process_vlan_resp(fnic, skb);
ret = 0;
} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
/* received CVL request, restart vlan disc */
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
/* pass it on to fcoe */
ret = 1;
}
drop:
return ret;
}
void fnic_handle_fip_frame(struct work_struct *work)
{
struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned long flags;
struct sk_buff *skb;
struct ethhdr *eh;
while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
dev_kfree_skb(skb);
return;
}
/*
* If we're in a transitional state, just re-queue and return.
* The queue will be serviced when we get to a stable state.
*/
if (fnic->state != FNIC_IN_FC_MODE &&
fnic->state != FNIC_IN_ETH_MODE) {
skb_queue_head(&fnic->fip_frame_queue, skb);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
eh = (struct ethhdr *)skb->data;
if (eh->h_proto == htons(ETH_P_FIP)) {
skb_pull(skb, sizeof(*eh));
if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
dev_kfree_skb(skb);
continue;
}
/*
* If there's FLOGI rejects - clear all
* fcf's & restart from scratch
*/
if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
atomic64_inc(
&fnic_stats->vlan_stats.flogi_rejects);
shost_printk(KERN_INFO, fnic->lport->host,
"Trigger a Link down - VLAN Disc\n");
fcoe_ctlr_link_down(&fnic->ctlr);
/* start FCoE VLAN discovery */
fnic_fcoe_send_vlan_req(fnic);
dev_kfree_skb(skb);
continue;
}
fcoe_ctlr_recv(&fnic->ctlr, skb);
continue;
}
}
}
/**
* fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
* @fnic: fnic instance.
* @skb: Ethernet Frame.
*/
static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
{
struct fc_frame *fp;
struct ethhdr *eh;
struct fcoe_hdr *fcoe_hdr;
struct fcoe_crc_eof *ft;
/*
* Undo VLAN encapsulation if present.
*/
eh = (struct ethhdr *)skb->data;
if (eh->h_proto == htons(ETH_P_8021Q)) {
memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
eh = skb_pull(skb, VLAN_HLEN);
skb_reset_mac_header(skb);
}
if (eh->h_proto == htons(ETH_P_FIP)) {
if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
printk(KERN_ERR "Dropped FIP frame, as firmware "
"uses non-FIP mode, Enable FIP "
"using UCSM\n");
goto drop;
}
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
printk(KERN_ERR "fnic ctlr frame trace error!!!");
}
skb_queue_tail(&fnic->fip_frame_queue, skb);
queue_work(fnic_fip_queue, &fnic->fip_frame_work);
return 1; /* let caller know packet was used */
}
if (eh->h_proto != htons(ETH_P_FCOE))
goto drop;
skb_set_network_header(skb, sizeof(*eh));
skb_pull(skb, sizeof(*eh));
fcoe_hdr = (struct fcoe_hdr *)skb->data;
if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
goto drop;
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_sof(fp) = fcoe_hdr->fcoe_sof;
skb_pull(skb, sizeof(struct fcoe_hdr));
skb_reset_transport_header(skb);
ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
fr_eof(fp) = ft->fcoe_eof;
skb_trim(skb, skb->len - sizeof(*ft));
return 0;
drop:
dev_kfree_skb_irq(skb);
return -1;
}
/**
* fnic_update_mac_locked() - set data MAC address and filters.
* @fnic: fnic instance.
* @new: newly-assigned FCoE MAC address.
*
* Called with the fnic lock held.
*/
void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
{
u8 *ctl = fnic->ctlr.ctl_src_addr;
u8 *data = fnic->data_src_addr;
if (is_zero_ether_addr(new))
new = ctl;
if (ether_addr_equal(data, new))
return;
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
vnic_dev_del_addr(fnic->vdev, data);
memcpy(data, new, ETH_ALEN);
if (!ether_addr_equal(new, ctl))
vnic_dev_add_addr(fnic->vdev, new);
}
/**
* fnic_update_mac() - set data MAC address and filters.
* @lport: local port.
* @new: newly-assigned FCoE MAC address.
*/
void fnic_update_mac(struct fc_lport *lport, u8 *new)
{
struct fnic *fnic = lport_priv(lport);
spin_lock_irq(&fnic->fnic_lock);
fnic_update_mac_locked(fnic, new);
spin_unlock_irq(&fnic->fnic_lock);
}
/**
* fnic_set_port_id() - set the port_ID after successful FLOGI.
* @lport: local port.
* @port_id: assigned FC_ID.
* @fp: received frame containing the FLOGI accept or NULL.
*
* This is called from libfc when a new FC_ID has been assigned.
* This causes us to reset the firmware to FC_MODE and setup the new MAC
* address and FC_ID.
*
* It is also called with FC_ID 0 when we're logged off.
*
* If the FC_ID is due to point-to-point, fp may be NULL.
*/
void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
{
struct fnic *fnic = lport_priv(lport);
u8 *mac;
int ret;
FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
port_id, fp);
/*
* If we're clearing the FC_ID, change to use the ctl_src_addr.
* Set ethernet mode to send FLOGI.
*/
if (!port_id) {
fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
fnic_set_eth_mode(fnic);
return;
}
if (fp) {
mac = fr_cb(fp)->granted_mac;
if (is_zero_ether_addr(mac)) {
/* non-FIP - FLOGI already accepted - ignore return */
fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
}
fnic_update_mac(lport, mac);
}
/* Change state to reflect transition to FC mode */
spin_lock_irq(&fnic->fnic_lock);
if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
else {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Unexpected fnic state %s while"
" processing flogi resp\n",
fnic_state_to_str(fnic->state));
spin_unlock_irq(&fnic->fnic_lock);
return;
}
spin_unlock_irq(&fnic->fnic_lock);
/*
* Send FLOGI registration to firmware to set up FC mode.
* The new address will be set up when registration completes.
*/
ret = fnic_flogi_reg_handler(fnic, port_id);
if (ret < 0) {
spin_lock_irq(&fnic->fnic_lock);
if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
fnic->state = FNIC_IN_ETH_MODE;
spin_unlock_irq(&fnic->fnic_lock);
}
}
static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
*cq_desc, struct vnic_rq_buf *buf,
int skipped __attribute__((unused)),
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
struct sk_buff *skb;
struct fc_frame *fp;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe = 0, fcoe_sof, fcoe_eof;
u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
u8 fcs_ok = 1, packet_error = 0;
u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
u32 rss_hash;
u16 exchange_id, tmpl;
u8 sof = 0;
u8 eof = 0;
u32 fcp_bytes_written = 0;
unsigned long flags;
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_FROM_DEVICE);
skb = buf->os_buf;
fp = (struct fc_frame *)skb;
buf->os_buf = NULL;
cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
if (type == CQ_DESC_TYPE_RQ_FCP) {
cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
&type, &color, &q_number, &completed_index,
&eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
&tmpl, &fcp_bytes_written, &sof, &eof,
&ingress_port, &packet_error,
&fcoe_enc_error, &fcs_ok, &vlan_stripped,
&vlan);
skb_trim(skb, fcp_bytes_written);
fr_sof(fp) = sof;
fr_eof(fp) = eof;
} else if (type == CQ_DESC_TYPE_RQ_ENET) {
cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
&type, &color, &q_number, &completed_index,
&ingress_port, &fcoe, &eop, &sop,
&rss_type, &csum_not_calc, &rss_hash,
&bytes_written, &packet_error,
&vlan_stripped, &vlan, &checksum,
&fcoe_sof, &fcoe_fc_crc_ok,
&fcoe_enc_error, &fcoe_eof,
&tcp_udp_csum_ok, &udp, &tcp,
&ipv4_csum_ok, &ipv6, &ipv4,
&ipv4_fragment, &fcs_ok);
skb_trim(skb, bytes_written);
if (!fcs_ok) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fcs error. dropping packet.\n");
goto drop;
}
if (fnic_import_rq_eth_pkt(fnic, skb))
return;
} else {
/* wrong CQ type*/
shost_printk(KERN_ERR, fnic->lport->host,
"fnic rq_cmpl wrong cq type x%x\n", type);
goto drop;
}
if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fnic rq_cmpl fcoe x%x fcsok x%x"
" pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
" x%x\n",
fcoe, fcs_ok, packet_error,
fcoe_fc_crc_ok, fcoe_enc_error);
goto drop;
}
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto drop;
}
fr_dev(fp) = fnic->lport;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
(char *)skb->data, skb->len)) != 0) {
printk(KERN_ERR "fnic ctlr frame trace error!!!");
}
skb_queue_tail(&fnic->frame_queue, skb);
queue_work(fnic_event_queue, &fnic->frame_work);
return;
drop:
dev_kfree_skb_irq(skb);
}
static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
struct cq_desc *cq_desc, u8 type,
u16 q_number, u16 completed_index,
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(vdev);
vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
NULL);
return 0;
}
int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
{
unsigned int tot_rq_work_done = 0, cur_work_done;
unsigned int i;
int err;
for (i = 0; i < fnic->rq_count; i++) {
cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
fnic_rq_cmpl_handler_cont,
NULL);
if (cur_work_done) {
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err)
shost_printk(KERN_ERR, fnic->lport->host,
"fnic_alloc_rq_frame can't alloc"
" frame\n");
}
tot_rq_work_done += cur_work_done;
}
return tot_rq_work_done;
}
/*
* This function is called once at init time to allocate and fill RQ
* buffers. Subsequently, it is called in the interrupt context after RQ
* buffer processing to replenish the buffers in the RQ
*/
int fnic_alloc_rq_frame(struct vnic_rq *rq)
{
struct fnic *fnic = vnic_dev_priv(rq->vdev);
struct sk_buff *skb;
u16 len;
dma_addr_t pa;
int r;
len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
skb = dev_alloc_skb(len);
if (!skb) {
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"Unable to allocate RQ sk_buff\n");
return -ENOMEM;
}
skb_reset_mac_header(skb);
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb_put(skb, len);
pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, pa)) {
r = -ENOMEM;
printk(KERN_ERR "PCI mapping failed with error %d\n", r);
goto free_skb;
}
fnic_queue_rq_desc(rq, skb, pa, len);
return 0;
free_skb:
kfree_skb(skb);
return r;
}
void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
{
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_FROM_DEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
}
/**
* fnic_eth_send() - Send Ethernet frame.
* @fip: fcoe_ctlr instance.
* @skb: Ethernet Frame, FIP, without VLAN encapsulation.
*/
void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fnic *fnic = fnic_from_ctlr(fip);
struct vnic_wq *wq = &fnic->wq[0];
dma_addr_t pa;
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
unsigned long flags;
if (!fnic->vlan_hw_insert) {
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
printk(KERN_ERR "fnic ctlr frame trace error!!!");
}
} else {
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
printk(KERN_ERR "fnic ctlr frame trace error!!!");
}
}
pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, pa)) {
printk(KERN_ERR "DMA mapping failed\n");
goto free_skb;
}
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq))
goto irq_restore;
fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
0 /* hw inserts cos value */,
fnic->vlan_id, 1);
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
return;
irq_restore:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
free_skb:
kfree_skb(skb);
}
/*
* Send FC frame.
*/
static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
{
struct vnic_wq *wq = &fnic->wq[0];
struct sk_buff *skb;
dma_addr_t pa;
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
struct fcoe_hdr *fcoe_hdr;
struct fc_frame_header *fh;
u32 tot_len, eth_hdr_len;
int ret = 0;
unsigned long flags;
fh = fc_frame_header_get(fp);
skb = fp_skb(fp);
if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
return 0;
if (!fnic->vlan_hw_insert) {
eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
vlan_hdr = skb_push(skb, eth_hdr_len);
eth_hdr = (struct ethhdr *)vlan_hdr;
vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
} else {
eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
eth_hdr = skb_push(skb, eth_hdr_len);
eth_hdr->h_proto = htons(ETH_P_FCOE);
fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
}
if (fnic->ctlr.map_dest)
fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
else
memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
tot_len = skb->len;
BUG_ON(tot_len % 4);
memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
fcoe_hdr->fcoe_sof = fr_sof(fp);
if (FC_FCOE_VER)
FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, pa)) {
ret = -ENOMEM;
printk(KERN_ERR "DMA map failed with error %d\n", ret);
goto free_skb_on_err;
}
if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
(char *)eth_hdr, tot_len)) != 0) {
printk(KERN_ERR "fnic ctlr frame trace error!!!");
}
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
ret = -1;
goto irq_restore;
}
fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
0 /* hw inserts cos value */,
fnic->vlan_id, 1, 1, 1);
irq_restore:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
free_skb_on_err:
if (ret)
dev_kfree_skb_any(fp_skb(fp));
return ret;
}
/*
* fnic_send
* Routine to send a raw frame
*/
int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
{
struct fnic *fnic = lport_priv(lp);
unsigned long flags;
if (fnic->in_remove) {
dev_kfree_skb(fp_skb(fp));
return -1;
}
/*
* Queue frame if in a transitional state.
* This occurs while registering the Port_ID / MAC address after FLOGI.
*/
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return 0;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return fnic_send_frame(fnic, fp);
}
/**
* fnic_flush_tx() - send queued frames.
* @fnic: fnic device
*
* Send frames that were waiting to go out in FC or Ethernet mode.
* Whenever changing modes we purge queued frames, so these frames should
* be queued for the stable mode that we're in, either FC or Ethernet.
*
* Called without fnic_lock held.
*/
void fnic_flush_tx(struct fnic *fnic)
{
struct sk_buff *skb;
struct fc_frame *fp;
while ((skb = skb_dequeue(&fnic->tx_queue))) {
fp = (struct fc_frame *)skb;
fnic_send_frame(fnic, fp);
}
}
/**
* fnic_set_eth_mode() - put fnic into ethernet mode.
* @fnic: fnic device
*
* Called without fnic lock held.
*/
static void fnic_set_eth_mode(struct fnic *fnic)
{
unsigned long flags;
enum fnic_state old_state;
int ret;
spin_lock_irqsave(&fnic->fnic_lock, flags);
again:
old_state = fnic->state;
switch (old_state) {
case FNIC_IN_FC_MODE:
case FNIC_IN_ETH_TRANS_FC_MODE:
default:
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
ret = fnic_fw_reset_handler(fnic);
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
goto again;
if (ret)
fnic->state = old_state;
break;
case FNIC_IN_FC_TRANS_ETH_MODE:
case FNIC_IN_ETH_MODE:
break;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct cq_desc *cq_desc,
struct vnic_wq_buf *buf, void *opaque)
{
struct sk_buff *skb = buf->os_buf;
struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_TO_DEVICE);
dev_kfree_skb_irq(fp_skb(fp));
buf->os_buf = NULL;
}
static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
struct cq_desc *cq_desc, u8 type,
u16 q_number, u16 completed_index,
void *opaque)
{
struct fnic *fnic = vnic_dev_priv(vdev);
unsigned long flags;
spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
fnic_wq_complete_frame_send, NULL);
spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
return 0;
}
int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
{
unsigned int wq_work_done = 0;
unsigned int i;
for (i = 0; i < fnic->raw_wq_count; i++) {
wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
work_to_do,
fnic_wq_cmpl_handler_cont,
NULL);
}
return wq_work_done;
}
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
DMA_TO_DEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
}
void fnic_fcoe_reset_vlans(struct fnic *fnic)
{
unsigned long flags;
struct fcoe_vlan *vlan;
struct fcoe_vlan *next;
/*
* indicate a link down to fcoe so that all fcf's are free'd
* might not be required since we did this before sending vlan
* discovery request
*/
spin_lock_irqsave(&fnic->vlans_lock, flags);
if (!list_empty(&fnic->vlans)) {
list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
list_del(&vlan->list);
kfree(vlan);
}
}
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
}
void fnic_handle_fip_timer(struct fnic *fnic)
{
unsigned long flags;
struct fcoe_vlan *vlan;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
u64 sol_time;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
return;
spin_lock_irqsave(&fnic->vlans_lock, flags);
if (list_empty(&fnic->vlans)) {
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
/* no vlans available, try again */
if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
if (printk_ratelimit())
shost_printk(KERN_DEBUG, fnic->lport->host,
"Start VLAN Discovery\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
return;
}
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"fip_timer: vlan %d state %d sol_count %d\n",
vlan->vid, vlan->state, vlan->sol_count);
switch (vlan->state) {
case FIP_VLAN_USED:
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
"FIP VLAN is selected for FC transaction\n");
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
break;
case FIP_VLAN_FAILED:
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
/* if all vlans are in failed state, restart vlan disc */
if (unlikely(fnic_log_level & FNIC_FCS_LOGGING))
if (printk_ratelimit())
shost_printk(KERN_DEBUG, fnic->lport->host,
"Start VLAN Discovery\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
break;
case FIP_VLAN_SENT:
if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
/*
* no response on this vlan, remove from the list.
* Try the next vlan
*/
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"Dequeue this VLAN ID %d from list\n",
vlan->vid);
list_del(&vlan->list);
kfree(vlan);
vlan = NULL;
if (list_empty(&fnic->vlans)) {
/* we exhausted all vlans, restart vlan disc */
spin_unlock_irqrestore(&fnic->vlans_lock,
flags);
FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
"fip_timer: vlan list empty, "
"trigger vlan disc\n");
fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
return;
}
/* check the next vlan */
vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
list);
fnic->set_vlan(fnic, vlan->vid);
vlan->state = FIP_VLAN_SENT; /* sent now */
}
spin_unlock_irqrestore(&fnic->vlans_lock, flags);
atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
vlan->sol_count++;
sol_time = jiffies + msecs_to_jiffies
(FCOE_CTLR_START_DELAY);
mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
break;
}
}
| linux-master | drivers/scsi/fnic/fnic_fcs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/mempool.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
#include <linux/gfp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include "fnic_io.h"
#include "fnic.h"
const char *fnic_state_str[] = {
[FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
[FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
[FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
[FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
};
static const char *fnic_ioreq_state_str[] = {
[FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
[FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
};
static const char *fcpio_status_str[] = {
[FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
[FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
[FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
[FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
[FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
[FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
[FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
[FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
[FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
[FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
[FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
[FCPIO_FW_ERR] = "FCPIO_FW_ERR",
[FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
[FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
[FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
[FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
[FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
[FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
};
const char *fnic_state_to_str(unsigned int state)
{
if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
return "unknown";
return fnic_state_str[state];
}
static const char *fnic_ioreq_state_to_str(unsigned int state)
{
if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
!fnic_ioreq_state_str[state])
return "unknown";
return fnic_ioreq_state_str[state];
}
static const char *fnic_fcpio_status_to_str(unsigned int status)
{
if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
return "unknown";
return fcpio_status_str[status];
}
static void fnic_cleanup_io(struct fnic *fnic);
static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
struct scsi_cmnd *sc)
{
u32 hash = scsi_cmd_to_rq(sc)->tag & (FNIC_IO_LOCKS - 1);
return &fnic->io_req_lock[hash];
}
static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
int tag)
{
return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
}
/*
* Unmap the data buffer and sense buffer for an io_req,
* also unmap and free the device-private scatter/gather list.
*/
static void fnic_release_ioreq_buf(struct fnic *fnic,
struct fnic_io_req *io_req,
struct scsi_cmnd *sc)
{
if (io_req->sgl_list_pa)
dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
DMA_TO_DEVICE);
scsi_dma_unmap(sc);
if (io_req->sgl_cnt)
mempool_free(io_req->sgl_list_alloc,
fnic->io_sgl_pool[io_req->sgl_type]);
if (io_req->sense_buf_pa)
dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
/* Free up Copy Wq descriptors. Called with copy_wq lock held */
static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
{
/* if no Ack received from firmware, then nothing to clean */
if (!fnic->fw_ack_recd[0])
return 1;
/*
* Update desc_available count based on number of freed descriptors
* Account for wraparound
*/
if (wq->to_clean_index <= fnic->fw_ack_index[0])
wq->ring.desc_avail += (fnic->fw_ack_index[0]
- wq->to_clean_index + 1);
else
wq->ring.desc_avail += (wq->ring.desc_count
- wq->to_clean_index
+ fnic->fw_ack_index[0] + 1);
/*
* just bump clean index to ack_index+1 accounting for wraparound
* this will essentially free up all descriptors between
* to_clean_index and fw_ack_index, both inclusive
*/
wq->to_clean_index =
(fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
/* we have processed the acks received so far */
fnic->fw_ack_recd[0] = 0;
return 0;
}
/*
* __fnic_set_state_flags
* Sets/Clears bits in fnic's state_flags
**/
void
__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
unsigned long clearbits)
{
unsigned long flags = 0;
unsigned long host_lock_flags = 0;
spin_lock_irqsave(&fnic->fnic_lock, flags);
spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
if (clearbits)
fnic->state_flags &= ~st_flags;
else
fnic->state_flags |= st_flags;
spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
/*
* fnic_fw_reset_handler
* Routine to send reset msg to fw
*/
int fnic_fw_reset_handler(struct fnic *fnic)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
int ret = 0;
unsigned long flags;
/* indicate fwreset to io path */
fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
skb_queue_purge(&fnic->frame_queue);
skb_queue_purge(&fnic->tx_queue);
/* wait for io cmpl */
while (atomic_read(&fnic->in_flight))
schedule_timeout(msecs_to_jiffies(1));
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq))
ret = -EAGAIN;
else {
fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(
&fnic->fnic_stats.fw_stats.active_fw_reqs));
}
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
if (!ret) {
atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issued fw reset\n");
} else {
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Failed to issue fw reset\n");
}
return ret;
}
/*
* fnic_flogi_reg_handler
* Routine to send flogi register msg to fw
*/
int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
enum fcpio_flogi_reg_format_type format;
struct fc_lport *lp = fnic->lport;
u8 gw_mac[ETH_ALEN];
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
ret = -EAGAIN;
goto flogi_reg_ioreq_end;
}
if (fnic->ctlr.map_dest) {
eth_broadcast_addr(gw_mac);
format = FCPIO_FLOGI_REG_DEF_DEST;
} else {
memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
format = FCPIO_FLOGI_REG_GW_DEST;
}
if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
fc_id, gw_mac,
fnic->data_src_addr,
lp->r_a_tov, lp->e_d_tov);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
fc_id, fnic->data_src_addr, gw_mac);
} else {
fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
format, fc_id, gw_mac);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"FLOGI reg issued fcid %x map %d dest %pM\n",
fc_id, fnic->ctlr.map_dest, gw_mac);
}
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
flogi_reg_ioreq_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
return ret;
}
/*
* fnic_queue_wq_copy_desc
* Routine to enqueue a wq copy desc
*/
static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
struct vnic_wq_copy *wq,
struct fnic_io_req *io_req,
struct scsi_cmnd *sc,
int sg_count)
{
struct scatterlist *sg;
struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
struct fc_rport_libfc_priv *rp = rport->dd_data;
struct host_sg_desc *desc;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned int i;
unsigned long intr_flags;
int flags;
u8 exch_flags;
struct scsi_lun fc_lun;
if (sg_count) {
/* For each SGE, create a device desc entry */
desc = io_req->sgl_list;
for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
desc->addr = cpu_to_le64(sg_dma_address(sg));
desc->len = cpu_to_le32(sg_dma_len(sg));
desc->_resvd = 0;
desc++;
}
io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
io_req->sgl_list,
sizeof(io_req->sgl_list[0]) * sg_count,
DMA_TO_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
printk(KERN_ERR "DMA mapping failed\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
}
io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * sg_count,
DMA_TO_DEVICE);
printk(KERN_ERR "DMA mapping failed\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
int_to_scsilun(sc->device->lun, &fc_lun);
/* Enqueue the descriptor in the Copy WQ */
spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"fnic_queue_wq_copy_desc failure - no descriptors\n");
atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
return SCSI_MLQUEUE_HOST_BUSY;
}
flags = 0;
if (sc->sc_data_direction == DMA_FROM_DEVICE)
flags = FCPIO_ICMND_RDDATA;
else if (sc->sc_data_direction == DMA_TO_DEVICE)
flags = FCPIO_ICMND_WRDATA;
exch_flags = 0;
if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
(rp->flags & FC_RP_FLAGS_RETRY))
exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
fnic_queue_wq_copy_desc_icmnd_16(wq, scsi_cmd_to_rq(sc)->tag,
0, exch_flags, io_req->sgl_cnt,
SCSI_SENSE_BUFFERSIZE,
io_req->sgl_list_pa,
io_req->sense_buf_pa,
0, /* scsi cmd ref, always 0 */
FCPIO_ICMND_PTA_SIMPLE,
/* scsi pri and tag */
flags, /* command flags */
sc->cmnd, sc->cmd_len,
scsi_bufflen(sc),
fc_lun.scsi_lun, io_req->port_id,
rport->maxframe_size, rp->r_a_tov,
rp->e_d_tov);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
return 0;
}
/*
* fnic_queuecommand
* Routine to send a scsi cdb
* Called with host_lock held and interrupts disabled.
*/
static int fnic_queuecommand_lck(struct scsi_cmnd *sc)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
const int tag = scsi_cmd_to_rq(sc)->tag;
struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
struct fnic_io_req *io_req = NULL;
struct fnic *fnic = lport_priv(lp);
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct vnic_wq_copy *wq;
int ret;
u64 cmd_trace;
int sg_count = 0;
unsigned long flags = 0;
unsigned long ptr;
spinlock_t *io_lock = NULL;
int io_lock_acquired = 0;
struct fc_rport_libfc_priv *rp;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
return SCSI_MLQUEUE_HOST_BUSY;
if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
return SCSI_MLQUEUE_HOST_BUSY;
rport = starget_to_rport(scsi_target(sc->device));
if (!rport) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"returning DID_NO_CONNECT for IO as rport is NULL\n");
sc->result = DID_NO_CONNECT << 16;
done(sc);
return 0;
}
ret = fc_remote_port_chkready(rport);
if (ret) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"rport is not ready\n");
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = ret;
done(sc);
return 0;
}
rp = rport->dd_data;
if (!rp || rp->rp_state == RPORT_ST_DELETE) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"rport 0x%x removed, returning DID_NO_CONNECT\n",
rport->port_id);
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
sc->result = DID_NO_CONNECT<<16;
done(sc);
return 0;
}
if (rp->rp_state != RPORT_ST_READY) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
rport->port_id, rp->rp_state);
sc->result = DID_IMM_RETRY << 16;
done(sc);
return 0;
}
if (lp->state != LPORT_ST_READY || !(lp->link_up))
return SCSI_MLQUEUE_HOST_BUSY;
atomic_inc(&fnic->in_flight);
/*
* Release host lock, use driver resource specific locks from here.
* Don't re-enable interrupts in case they were disabled prior to the
* caller disabling them.
*/
spin_unlock(lp->host->host_lock);
fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED;
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
/* Get a new io_req for this SCSI IO */
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.alloc_failures);
ret = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
memset(io_req, 0, sizeof(*io_req));
/* Map the data buffer */
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
tag, sc, 0, sc->cmnd[0], sg_count, fnic_priv(sc)->state);
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
/* Determine the type of scatter/gather list we need */
io_req->sgl_cnt = sg_count;
io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
if (sg_count > FNIC_DFLT_SG_DESC_CNT)
io_req->sgl_type = FNIC_SGL_CACHE_MAX;
if (sg_count) {
io_req->sgl_list =
mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
GFP_ATOMIC);
if (!io_req->sgl_list) {
atomic64_inc(&fnic_stats->io_stats.alloc_failures);
ret = SCSI_MLQUEUE_HOST_BUSY;
scsi_dma_unmap(sc);
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
/* Cache sgl list allocated address before alignment */
io_req->sgl_list_alloc = io_req->sgl_list;
ptr = (unsigned long) io_req->sgl_list;
if (ptr % FNIC_SG_DESC_ALIGN) {
io_req->sgl_list = (struct host_sg_desc *)
(((unsigned long) ptr
+ FNIC_SG_DESC_ALIGN - 1)
& ~(FNIC_SG_DESC_ALIGN - 1));
}
}
/*
* Will acquire lock defore setting to IO initialized.
*/
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
/* initialize rest of io_req */
io_lock_acquired = 1;
io_req->port_id = rport->port_id;
io_req->start_time = jiffies;
fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
fnic_priv(sc)->io_req = io_req;
fnic_priv(sc)->flags |= FNIC_IO_INITIALIZED;
/* create copy wq desc and enqueue it */
wq = &fnic->wq_copy[0];
ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
if (ret) {
/*
* In case another thread cancelled the request,
* refetch the pointer under the lock.
*/
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
tag, sc, 0, 0, 0, fnic_flags_and_state(sc));
io_req = fnic_priv(sc)->io_req;
fnic_priv(sc)->io_req = NULL;
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
atomic_dec(&fnic->in_flight);
/* acquire host lock before returning to SCSI */
spin_lock(lp->host->host_lock);
return ret;
} else {
atomic64_inc(&fnic_stats->io_stats.active_ios);
atomic64_inc(&fnic_stats->io_stats.num_ios);
if (atomic64_read(&fnic_stats->io_stats.active_ios) >
atomic64_read(&fnic_stats->io_stats.max_active_ios))
atomic64_set(&fnic_stats->io_stats.max_active_ios,
atomic64_read(&fnic_stats->io_stats.active_ios));
/* REVISIT: Use per IO lock in the final code */
fnic_priv(sc)->flags |= FNIC_IO_ISSUED;
}
out:
cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
(u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
sc->cmnd[5]);
FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
tag, sc, io_req, sg_count, cmd_trace,
fnic_flags_and_state(sc));
/* if only we issued IO, will we have the io lock */
if (io_lock_acquired)
spin_unlock_irqrestore(io_lock, flags);
atomic_dec(&fnic->in_flight);
/* acquire host lock before returning to SCSI */
spin_lock(lp->host->host_lock);
return ret;
}
DEF_SCSI_QCMD(fnic_queuecommand)
/*
* fnic_fcpio_fw_reset_cmpl_handler
* Routine to handle fw reset completion
*/
static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
struct fcpio_fw_req *desc)
{
u8 type;
u8 hdr_status;
struct fcpio_tag tag;
int ret = 0;
unsigned long flags;
struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
atomic64_inc(&reset_stats->fw_reset_completions);
/* Clean up all outstanding io requests */
fnic_cleanup_io(fnic);
atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
atomic64_set(&fnic->io_cmpl_skip, 0);
spin_lock_irqsave(&fnic->fnic_lock, flags);
/* fnic should be in FC_TRANS_ETH_MODE */
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
/* Check status of reset completion */
if (!hdr_status) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"reset cmpl success\n");
/* Ready to send flogi out */
fnic->state = FNIC_IN_ETH_MODE;
} else {
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"fnic fw_reset : failed %s\n",
fnic_fcpio_status_to_str(hdr_status));
/*
* Unable to change to eth mode, cannot send out flogi
* Change state to fc mode, so that subsequent Flogi
* requests from libFC will cause more attempts to
* reset the firmware. Free the cached flogi
*/
fnic->state = FNIC_IN_FC_MODE;
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
} else {
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Unexpected state %s while processing"
" reset cmpl\n", fnic_state_to_str(fnic->state));
atomic64_inc(&reset_stats->fw_reset_failures);
ret = -1;
}
/* Thread removing device blocks till firmware reset is complete */
if (fnic->remove_wait)
complete(fnic->remove_wait);
/*
* If fnic is being removed, or fw reset failed
* free the flogi frame. Else, send it out
*/
if (fnic->remove_wait || ret) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
skb_queue_purge(&fnic->tx_queue);
goto reset_cmpl_handler_end;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_flush_tx(fnic);
reset_cmpl_handler_end:
fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
return ret;
}
/*
* fnic_fcpio_flogi_reg_cmpl_handler
* Routine to handle flogi register completion
*/
static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
struct fcpio_fw_req *desc)
{
u8 type;
u8 hdr_status;
struct fcpio_tag tag;
int ret = 0;
unsigned long flags;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
/* Update fnic state based on status of flogi reg completion */
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
/* Check flogi registration completion status */
if (!hdr_status) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"flog reg succeeded\n");
fnic->state = FNIC_IN_FC_MODE;
} else {
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"fnic flogi reg :failed %s\n",
fnic_fcpio_status_to_str(hdr_status));
fnic->state = FNIC_IN_ETH_MODE;
ret = -1;
}
} else {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Unexpected fnic state %s while"
" processing flogi reg completion\n",
fnic_state_to_str(fnic->state));
ret = -1;
}
if (!ret) {
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
goto reg_cmpl_handler_end;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fnic_flush_tx(fnic);
queue_work(fnic_event_queue, &fnic->frame_work);
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
reg_cmpl_handler_end:
return ret;
}
static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
u16 request_out)
{
if (wq->to_clean_index <= wq->to_use_index) {
/* out of range, stale request_out index */
if (request_out < wq->to_clean_index ||
request_out >= wq->to_use_index)
return 0;
} else {
/* out of range, stale request_out index */
if (request_out < wq->to_clean_index &&
request_out >= wq->to_use_index)
return 0;
}
/* request_out index is in range */
return 1;
}
/*
* Mark that ack received and store the Ack index. If there are multiple
* acks received before Tx thread cleans it up, the latest value will be
* used which is correct behavior. This state should be in the copy Wq
* instead of in the fnic
*/
static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
unsigned int cq_index,
struct fcpio_fw_req *desc)
{
struct vnic_wq_copy *wq;
u16 request_out = desc->u.ack.request_out;
unsigned long flags;
u64 *ox_id_tag = (u64 *)(void *)desc;
/* mark the ack state */
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
if (is_ack_index_in_range(wq, request_out)) {
fnic->fw_ack_index[0] = request_out;
fnic->fw_ack_recd[0] = 1;
} else
atomic64_inc(
&fnic->fnic_stats.misc_stats.ack_index_out_of_range);
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
FNIC_TRACE(fnic_fcpio_ack_handler,
fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
ox_id_tag[4], ox_id_tag[5]);
}
/*
* fnic_fcpio_icmnd_cmpl_handler
* Routine to handle icmnd completions
*/
static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
struct fcpio_fw_req *desc)
{
u8 type;
u8 hdr_status;
struct fcpio_tag tag;
u32 id;
u64 xfer_len = 0;
struct fcpio_icmnd_cmpl *icmnd_cmpl;
struct fnic_io_req *io_req;
struct scsi_cmnd *sc;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
unsigned long flags;
spinlock_t *io_lock;
u64 cmd_trace;
unsigned long start_time;
unsigned long io_duration_time;
/* Decode the cmpl description to get the io_req id */
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
icmnd_cmpl = &desc->u.icmnd_cmpl;
if (id >= fnic->fnic_max_tag_id) {
shost_printk(KERN_ERR, fnic->lport->host,
"Tag out of range tag %x hdr status = %s\n",
id, fnic_fcpio_status_to_str(hdr_status));
return;
}
sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
shost_printk(KERN_ERR, fnic->lport->host,
"icmnd_cmpl sc is null - "
"hdr status = %s tag = 0x%x desc = 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), id, desc);
FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
fnic->lport->host->host_no, id,
((u64)icmnd_cmpl->_resvd0[1] << 16 |
(u64)icmnd_cmpl->_resvd0[0]),
((u64)hdr_status << 16 |
(u64)icmnd_cmpl->scsi_status << 8 |
(u64)icmnd_cmpl->flags), desc,
(u64)icmnd_cmpl->residual, 0);
return;
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
shost_printk(KERN_ERR, fnic->lport->host,
"icmnd_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
start_time = io_req->start_time;
/* firmware completed the io */
io_req->io_completed = 1;
/*
* if SCSI-ML has already issued abort on this command,
* set completion of the IO. The abts path will clean it up
*/
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
/*
* set the FNIC_IO_DONE so that this doesn't get
* flagged as 'out of order' if it was not aborted
*/
fnic_priv(sc)->flags |= FNIC_IO_DONE;
fnic_priv(sc)->flags |= FNIC_IO_ABTS_PENDING;
spin_unlock_irqrestore(io_lock, flags);
if(FCPIO_ABORTED == hdr_status)
fnic_priv(sc)->flags |= FNIC_IO_ABORTED;
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"icmnd_cmpl abts pending "
"hdr status = %s tag = 0x%x sc = 0x%p "
"scsi_status = %x residual = %d\n",
fnic_fcpio_status_to_str(hdr_status),
id, sc,
icmnd_cmpl->scsi_status,
icmnd_cmpl->residual);
return;
}
/* Mark the IO as complete */
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
icmnd_cmpl = &desc->u.icmnd_cmpl;
switch (hdr_status) {
case FCPIO_SUCCESS:
sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
xfer_len = scsi_bufflen(sc);
if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) {
xfer_len -= icmnd_cmpl->residual;
scsi_set_resid(sc, icmnd_cmpl->residual);
}
if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
atomic64_inc(&fnic_stats->misc_stats.check_condition);
if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
break;
case FCPIO_TIMEOUT: /* request was timed out */
atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_ABORTED: /* request was aborted */
atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
scsi_set_resid(sc, icmnd_cmpl->residual);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
atomic64_inc(&fnic_stats->io_stats.io_not_found);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_FW_ERR: /* request was terminated due fw error */
atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
case FCPIO_INVALID_HEADER: /* header contains invalid data */
case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
default:
sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
break;
}
/* Break link with the SCSI command */
fnic_priv(sc)->io_req = NULL;
fnic_priv(sc)->flags |= FNIC_IO_DONE;
if (hdr_status != FCPIO_SUCCESS) {
atomic64_inc(&fnic_stats->io_stats.io_failures);
shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
fnic_fcpio_status_to_str(hdr_status));
}
fnic_release_ioreq_buf(fnic, io_req, sc);
cmd_trace = ((u64)hdr_status << 56) |
(u64)icmnd_cmpl->scsi_status << 48 |
(u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5];
FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
sc->device->host->host_no, id, sc,
((u64)icmnd_cmpl->_resvd0[1] << 56 |
(u64)icmnd_cmpl->_resvd0[0] << 48 |
jiffies_to_msecs(jiffies - start_time)),
desc, cmd_trace, fnic_flags_and_state(sc));
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
fnic->lport->host_stats.fcp_input_requests++;
fnic->fcp_input_bytes += xfer_len;
} else if (sc->sc_data_direction == DMA_TO_DEVICE) {
fnic->lport->host_stats.fcp_output_requests++;
fnic->fcp_output_bytes += xfer_len;
} else
fnic->lport->host_stats.fcp_control_requests++;
/* Call SCSI completion function to complete the IO */
scsi_done(sc);
spin_unlock_irqrestore(io_lock, flags);
mempool_free(io_req, fnic->io_req_pool);
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
io_duration_time = jiffies_to_msecs(jiffies) -
jiffies_to_msecs(start_time);
if(io_duration_time <= 10)
atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
else if(io_duration_time <= 100)
atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
else if(io_duration_time <= 500)
atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
else if(io_duration_time <= 5000)
atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
else if(io_duration_time <= 10000)
atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
else if(io_duration_time <= 30000)
atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
else {
atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);
if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
}
}
/* fnic_fcpio_itmf_cmpl_handler
* Routine to handle itmf completions
*/
static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
struct fcpio_fw_req *desc)
{
u8 type;
u8 hdr_status;
struct fcpio_tag tag;
u32 id;
struct scsi_cmnd *sc;
struct fnic_io_req *io_req;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
spinlock_t *io_lock;
unsigned long start_time;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
shost_printk(KERN_ERR, fnic->lport->host,
"Tag out of range tag %x hdr status = %s\n",
id, fnic_fcpio_status_to_str(hdr_status));
return;
}
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
WARN_ON_ONCE(!sc);
if (!sc) {
atomic64_inc(&fnic_stats->io_stats.sc_null);
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
fnic_fcpio_status_to_str(hdr_status), id);
return;
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
WARN_ON_ONCE(!io_req);
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
shost_printk(KERN_ERR, fnic->lport->host,
"itmf_cmpl io_req is null - "
"hdr status = %s tag = 0x%x sc 0x%p\n",
fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
start_time = io_req->start_time;
if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
/* Abort and terminate completion of device reset req */
/* REVISIT : Add asserts about various flags */
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset abts cmpl recd. id %x status %s\n",
id, fnic_fcpio_status_to_str(hdr_status));
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
fnic_priv(sc)->abts_status = hdr_status;
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
if (io_req->abts_done)
complete(io_req->abts_done);
spin_unlock_irqrestore(io_lock, flags);
} else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
switch (hdr_status) {
case FCPIO_SUCCESS:
break;
case FCPIO_TIMEOUT:
if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_fw_timeouts);
else
atomic64_inc(
&term_stats->terminate_fw_timeouts);
break;
case FCPIO_ITMF_REJECTED:
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"abort reject recd. id %d\n",
(int)(id & FNIC_TAG_MASK));
break;
case FCPIO_IO_NOT_FOUND:
if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_io_not_found);
else
atomic64_inc(
&term_stats->terminate_io_not_found);
break;
default:
if (fnic_priv(sc)->flags & FNIC_IO_ABTS_ISSUED)
atomic64_inc(&abts_stats->abort_failures);
else
atomic64_inc(
&term_stats->terminate_failures);
break;
}
if (fnic_priv(sc)->state != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
spin_unlock_irqrestore(io_lock, flags);
return;
}
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
fnic_priv(sc)->abts_status = hdr_status;
/* If the status is IO not found consider it as success */
if (hdr_status == FCPIO_IO_NOT_FOUND)
fnic_priv(sc)->abts_status = FCPIO_SUCCESS;
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_fcpio_status_to_str(hdr_status));
/*
* If scsi_eh thread is blocked waiting for abts to complete,
* signal completion to it. IO will be cleaned in the thread
* else clean it in this context
*/
if (io_req->abts_done) {
complete(io_req->abts_done);
spin_unlock_irqrestore(io_lock, flags);
} else {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl, completing IO\n");
fnic_priv(sc)->io_req = NULL;
sc->result = (DID_ERROR << 16);
spin_unlock_irqrestore(io_lock, flags);
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id,
sc,
jiffies_to_msecs(jiffies - start_time),
desc,
(((u64)hdr_status << 40) |
(u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
scsi_done(sc);
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
fnic_priv(sc)->lr_status = hdr_status;
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
fnic_priv(sc)->flags |= FNIC_DEV_RST_ABTS_PENDING;
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
desc, 0, fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Terminate pending "
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_fcpio_status_to_str(hdr_status));
return;
}
if (fnic_priv(sc)->flags & FNIC_DEV_RST_TIMED_OUT) {
/* Need to wait for terminate completion */
spin_unlock_irqrestore(io_lock, flags);
FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
desc, 0, fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd after time out. "
"id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_fcpio_status_to_str(hdr_status));
return;
}
fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE;
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
fnic_fcpio_status_to_str(hdr_status));
if (io_req->dr_done)
complete(io_req->dr_done);
spin_unlock_irqrestore(io_lock, flags);
} else {
shost_printk(KERN_ERR, fnic->lport->host,
"Unexpected itmf io state %s tag %x\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state), id);
spin_unlock_irqrestore(io_lock, flags);
}
}
/*
* fnic_fcpio_cmpl_handler
* Routine to service the cq for wq_copy
*/
static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
unsigned int cq_index,
struct fcpio_fw_req *desc)
{
struct fnic *fnic = vnic_dev_priv(vdev);
switch (desc->hdr.type) {
case FCPIO_ICMND_CMPL: /* fw completed a command */
case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
case FCPIO_RESET_CMPL: /* fw completed reset */
atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
break;
default:
break;
}
switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
fnic_fcpio_ack_handler(fnic, cq_index, desc);
break;
case FCPIO_ICMND_CMPL: /* fw completed a command */
fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
break;
case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
fnic_fcpio_itmf_cmpl_handler(fnic, desc);
break;
case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
break;
case FCPIO_RESET_CMPL: /* fw completed reset */
fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
break;
default:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"firmware completion type %d\n",
desc->hdr.type);
break;
}
return 0;
}
/*
* fnic_wq_copy_cmpl_handler
* Routine to process wq copy
*/
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
{
unsigned int wq_work_done = 0;
unsigned int i, cq_index;
unsigned int cur_work_done;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
u64 start_jiffies = 0;
u64 end_jiffies = 0;
u64 delta_jiffies = 0;
u64 delta_ms = 0;
for (i = 0; i < fnic->wq_copy_count; i++) {
cq_index = i + fnic->raw_wq_count + fnic->rq_count;
start_jiffies = jiffies;
cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
fnic_fcpio_cmpl_handler,
copy_work_to_do);
end_jiffies = jiffies;
wq_work_done += cur_work_done;
delta_jiffies = end_jiffies - start_jiffies;
if (delta_jiffies >
(u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
atomic64_set(&misc_stats->max_isr_jiffies,
delta_jiffies);
delta_ms = jiffies_to_msecs(delta_jiffies);
atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
atomic64_set(&misc_stats->corr_work_done,
cur_work_done);
}
}
return wq_work_done;
}
static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data)
{
const int tag = scsi_cmd_to_rq(sc)->tag;
struct fnic *fnic = data;
struct fnic_io_req *io_req;
unsigned long flags = 0;
spinlock_t *io_lock;
unsigned long start_time = 0;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
io_lock = fnic_io_lock_tag(fnic, tag);
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
/*
* We will be here only when FW completes reset
* without sending completions for outstanding ios.
*/
fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE;
if (io_req && io_req->dr_done)
complete(io_req->dr_done);
else if (io_req && io_req->abts_done)
complete(io_req->abts_done);
spin_unlock_irqrestore(io_lock, flags);
return true;
} else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto cleanup_scsi_cmd;
}
fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
/*
* If there is a scsi_cmnd associated with this io_req, then
* free the corresponding state
*/
start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
cleanup_scsi_cmd:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_cleanup_io: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
tag, sc, jiffies - start_time);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
/* Complete the command to SCSI */
if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED))
shost_printk(KERN_ERR, fnic->lport->host,
"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
tag, sc);
FNIC_TRACE(fnic_cleanup_io,
sc->device->host->host_no, tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 |
(u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
scsi_done(sc);
return true;
}
static void fnic_cleanup_io(struct fnic *fnic)
{
scsi_host_busy_iter(fnic->lport->host,
fnic_cleanup_io_iter, fnic);
}
void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
struct fcpio_host_req *desc)
{
u32 id;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
struct fnic_io_req *io_req;
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
unsigned long start_time = 0;
/* get the tag reference */
fcpio_tag_id_dec(&desc->hdr.tag, &id);
id &= FNIC_TAG_MASK;
if (id >= fnic->fnic_max_tag_id)
return;
sc = scsi_host_find_tag(fnic->lport->host, id);
if (!sc)
return;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
/* Get the IO context which this desc refers to */
io_req = fnic_priv(sc)->io_req;
/* fnic interrupts are turned off by now */
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto wq_copy_cleanup_scsi_cmd;
}
fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
wq_copy_cleanup_scsi_cmd:
sc->result = DID_NO_CONNECT << 16;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
FNIC_TRACE(fnic_wq_copy_cleanup_handler,
sc->device->host->host_no, id, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
scsi_done(sc);
}
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
u32 task_req, u8 *fc_lun,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
struct Scsi_Host *host = fnic->lport->host;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
unsigned long flags;
spin_lock_irqsave(host->host_lock, flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
FNIC_FLAGS_IO_BLOCKED))) {
spin_unlock_irqrestore(host->host_lock, flags);
return 1;
} else
atomic_inc(&fnic->in_flight);
spin_unlock_irqrestore(host->host_lock, flags);
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
atomic_dec(&fnic->in_flight);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_queue_abort_io_req: failure: no descriptors\n");
atomic64_inc(&misc_stats->abts_cpwq_alloc_failures);
return 1;
}
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
0, task_req, tag, fc_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
atomic_dec(&fnic->in_flight);
return 0;
}
struct fnic_rport_abort_io_iter_data {
struct fnic *fnic;
u32 port_id;
int term_cnt;
};
static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_rport_abort_io_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
int abt_tag = scsi_cmd_to_rq(sc)->tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct scsi_lun fc_lun;
enum fnic_ioreq_state old_ioreq_state;
io_lock = fnic_io_lock_tag(fnic, abt_tag);
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req || io_req->port_id != iter_data->port_id) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
sc);
spin_unlock_irqrestore(io_lock, flags);
return true;
}
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
*/
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
if (io_req->abts_done) {
shost_printk(KERN_ERR, fnic->lport->host,
"fnic_rport_exch_reset: io_req->abts_done is set "
"state is %s\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
}
if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) {
shost_printk(KERN_ERR, fnic->lport->host,
"rport_exch_reset "
"IO not yet issued %p tag 0x%x flags "
"%x state %d\n",
sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state);
}
old_ioreq_state = fnic_priv(sc)->state;
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
atomic64_inc(&reset_stats->device_reset_terminates);
abt_tag |= FNIC_TAG_DEV_RST;
}
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_rport_exch_reset dev rst sc 0x%p\n", sc);
BUG_ON(io_req->abts_done);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_rport_reset_exch: Issuing abts\n");
spin_unlock_irqrestore(io_lock, flags);
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
* Revert the cmd state back to old state, if
* it hasn't changed in between. This cmd will get
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
spin_lock_irqsave(io_lock, flags);
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
fnic_priv(sc)->state = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
} else {
spin_lock_irqsave(io_lock, flags);
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
else
fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
atomic64_inc(&term_stats->terminates);
iter_data->term_cnt++;
}
return true;
}
static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
struct fnic_rport_abort_io_iter_data iter_data = {
.fnic = fnic,
.port_id = port_id,
.term_cnt = 0,
};
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"fnic_rport_exch_reset called portid 0x%06x\n",
port_id);
if (fnic->in_remove)
return;
scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter,
&iter_data);
if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates))
atomic64_set(&term_stats->max_terminates, iter_data.term_cnt);
}
void fnic_terminate_rport_io(struct fc_rport *rport)
{
struct fc_rport_libfc_priv *rdata;
struct fc_lport *lport;
struct fnic *fnic;
if (!rport) {
printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n");
return;
}
rdata = rport->dd_data;
if (!rdata) {
printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n");
return;
}
lport = rdata->local_port;
if (!lport) {
printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n");
return;
}
fnic = lport_priv(lport);
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host, "fnic_terminate_rport_io called"
" wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
rport->port_name, rport->node_name, rport,
rport->port_id);
if (fnic->in_remove)
return;
fnic_rport_exch_reset(fnic, rport->port_id);
}
/*
* This function is exported to SCSI for sending abort cmnds.
* A SCSI IO is represented by a io_req in the driver.
* The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
*/
int fnic_abort_cmd(struct scsi_cmnd *sc)
{
struct request *const rq = scsi_cmd_to_rq(sc);
struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
spinlock_t *io_lock;
unsigned long flags;
unsigned long start_time = 0;
int ret = SUCCESS;
u32 task_req = 0;
struct scsi_lun fc_lun;
struct fnic_stats *fnic_stats;
struct abort_stats *abts_stats;
struct terminate_stats *term_stats;
enum fnic_ioreq_state old_ioreq_state;
const int tag = rq->tag;
unsigned long abt_issued_time;
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
lp = shost_priv(sc->device->host);
fnic = lport_priv(lp);
fnic_stats = &fnic->fnic_stats;
abts_stats = &fnic->fnic_stats.abts_stats;
term_stats = &fnic->fnic_stats.term_stats;
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Abort Cmd called FCID 0x%x, LUN 0x%llx TAG %x flags %x\n",
rport->port_id, sc->device->lun, tag, fnic_priv(sc)->flags);
fnic_priv(sc)->flags = FNIC_NO_FLAGS;
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
goto fnic_abort_cmd_end;
}
/*
* Avoid a race between SCSI issuing the abort and the device
* completing the command.
*
* If the command is already completed by the fw cmpl code,
* we just return SUCCESS from here. This means that the abort
* succeeded. In the SCSI ML, since the timeout for command has
* happened, the completion wont actually complete the command
* and it will be considered as an aborted command
*
* .io_req will not be cleared except while holding io_req_lock.
*/
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto fnic_abort_cmd_end;
}
io_req->abts_done = &tm_done;
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
goto wait_pending;
}
abt_issued_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
if (abt_issued_time <= 6000)
atomic64_inc(&abts_stats->abort_issued_btw_0_to_6_sec);
else if (abt_issued_time > 6000 && abt_issued_time <= 20000)
atomic64_inc(&abts_stats->abort_issued_btw_6_to_20_sec);
else if (abt_issued_time > 20000 && abt_issued_time <= 30000)
atomic64_inc(&abts_stats->abort_issued_btw_20_to_30_sec);
else if (abt_issued_time > 30000 && abt_issued_time <= 40000)
atomic64_inc(&abts_stats->abort_issued_btw_30_to_40_sec);
else if (abt_issued_time > 40000 && abt_issued_time <= 50000)
atomic64_inc(&abts_stats->abort_issued_btw_40_to_50_sec);
else if (abt_issued_time > 50000 && abt_issued_time <= 60000)
atomic64_inc(&abts_stats->abort_issued_btw_50_to_60_sec);
else
atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec);
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"CBD Opcode: %02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time);
/*
* Command is still pending, need to abort it
* If the firmware completes the command after this point,
* the completion wont be done till mid-layer, since abort
* has already started.
*/
old_ioreq_state = fnic_priv(sc)->state;
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
/*
* Check readiness of the remote port. If the path to remote
* port is up, then send abts to the remote port to terminate
* the IO. Else, just locally terminate the IO in the firmware
*/
if (fc_remote_port_chkready(rport) == 0)
task_req = FCPIO_ITMF_ABT_TASK;
else {
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
task_req = FCPIO_ITMF_ABT_TASK_TERM;
}
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, tag, task_req, fc_lun.scsi_lun,
io_req)) {
spin_lock_irqsave(io_lock, flags);
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
fnic_priv(sc)->state = old_ioreq_state;
io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->abts_done = NULL;
spin_unlock_irqrestore(io_lock, flags);
ret = FAILED;
goto fnic_abort_cmd_end;
}
if (task_req == FCPIO_ITMF_ABT_TASK) {
fnic_priv(sc)->flags |= FNIC_IO_ABTS_ISSUED;
atomic64_inc(&fnic_stats->abts_stats.aborts);
} else {
fnic_priv(sc)->flags |= FNIC_IO_TERM_ISSUED;
atomic64_inc(&fnic_stats->term_stats.terminates);
}
/*
* We queued an abort IO, wait for its completion.
* Once the firmware completes the abort command, it will
* wake up this thread.
*/
wait_pending:
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies
(2 * fnic->config.ra_tov +
fnic->config.ed_tov));
/* Check the abort status */
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
atomic64_inc(&fnic_stats->io_stats.ioreq_null);
spin_unlock_irqrestore(io_lock, flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
goto fnic_abort_cmd_end;
}
io_req->abts_done = NULL;
/* fw did not complete abort, timed out */
if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
if (task_req == FCPIO_ITMF_ABT_TASK) {
atomic64_inc(&abts_stats->abort_drv_timeouts);
} else {
atomic64_inc(&term_stats->terminate_drv_timeouts);
}
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
/* IO out of order */
if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issuing Host reset due to out of order IO\n");
ret = FAILED;
goto fnic_abort_cmd_end;
}
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
start_time = io_req->start_time;
/*
* firmware completed the abort, check the status,
* free the io_req if successful. If abort fails,
* Device reset will clean the I/O.
*/
if (fnic_priv(sc)->abts_status == FCPIO_SUCCESS) {
fnic_priv(sc)->io_req = NULL;
} else {
ret = FAILED;
spin_unlock_irqrestore(io_lock, flags);
goto fnic_abort_cmd_end;
}
spin_unlock_irqrestore(io_lock, flags);
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
/* Call SCSI completion function to complete the IO */
sc->result = DID_ABORT << 16;
scsi_done(sc);
atomic64_dec(&fnic_stats->io_stats.active_ios);
if (atomic64_read(&fnic->io_cmpl_skip))
atomic64_dec(&fnic->io_cmpl_skip);
else
atomic64_inc(&fnic_stats->io_stats.io_completions);
fnic_abort_cmd_end:
FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no, tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from abort cmd type %x %s\n", task_req,
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
return ret;
}
static inline int fnic_queue_dr_io_req(struct fnic *fnic,
struct scsi_cmnd *sc,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
struct Scsi_Host *host = fnic->lport->host;
struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
spin_lock_irqsave(host->host_lock, intr_flags);
if (unlikely(fnic_chk_state_flags_locked(fnic,
FNIC_FLAGS_IO_BLOCKED))) {
spin_unlock_irqrestore(host->host_lock, intr_flags);
return FAILED;
} else
atomic_inc(&fnic->in_flight);
spin_unlock_irqrestore(host->host_lock, intr_flags);
spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"queue_dr_io_req failure - no descriptors\n");
atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures);
ret = -EAGAIN;
goto lr_io_req_end;
}
/* fill in the lun info */
int_to_scsilun(sc->device->lun, &fc_lun);
fnic_queue_wq_copy_desc_itmf(wq, scsi_cmd_to_rq(sc)->tag | FNIC_TAG_DEV_RST,
0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
fc_lun.scsi_lun, io_req->port_id,
fnic->config.ra_tov, fnic->config.ed_tov);
atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
atomic_dec(&fnic->in_flight);
return ret;
}
struct fnic_pending_aborts_iter_data {
struct fnic *fnic;
struct scsi_cmnd *lr_sc;
struct scsi_device *lun_dev;
int ret;
};
static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
struct scsi_device *lun_dev = iter_data->lun_dev;
int abt_tag = scsi_cmd_to_rq(sc)->tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
struct scsi_lun fc_lun;
DECLARE_COMPLETION_ONSTACK(tm_done);
enum fnic_ioreq_state old_ioreq_state;
if (sc == iter_data->lr_sc || sc->device != lun_dev)
return true;
io_lock = fnic_io_lock_tag(fnic, abt_tag);
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
/*
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) &&
(!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) {
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"%s dev rst not pending sc 0x%p\n", __func__,
sc);
spin_unlock_irqrestore(io_lock, flags);
return true;
}
if (io_req->abts_done)
shost_printk(KERN_ERR, fnic->lport->host,
"%s: io_req->abts_done is set state is %s\n",
__func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state));
old_ioreq_state = fnic_priv(sc)->state;
/*
* Any pending IO issued prior to reset is expected to be
* in abts pending state, if not we need to set
* FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
* When IO is completed, the IO will be handed over and
* handled in this function.
*/
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
BUG_ON(io_req->abts_done);
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) {
abt_tag |= FNIC_TAG_DEV_RST;
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"%s: dev rst sc 0x%p\n", __func__, sc);
}
fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE;
io_req->abts_done = &tm_done;
spin_unlock_irqrestore(io_lock, flags);
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->abts_done = NULL;
if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING)
fnic_priv(sc)->state = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
iter_data->ret = FAILED;
return false;
} else {
spin_lock_irqsave(io_lock, flags);
if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET)
fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
}
fnic_priv(sc)->flags |= FNIC_IO_INTERNAL_TERM_ISSUED;
wait_for_completion_timeout(&tm_done, msecs_to_jiffies
(fnic->config.ed_tov));
/* Recheck cmd state to check if it is now aborted */
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL;
return true;
}
io_req->abts_done = NULL;
/* if abort is still pending with fw, fail */
if (fnic_priv(sc)->abts_status == FCPIO_INVALID_CODE) {
spin_unlock_irqrestore(io_lock, flags);
fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_DONE;
iter_data->ret = FAILED;
return false;
}
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_COMPLETE;
/* original sc used for lr is handled by dev reset code */
if (sc != iter_data->lr_sc)
fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
/* original sc used for lr is handled by dev reset code */
if (sc != iter_data->lr_sc) {
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
/*
* Any IO is returned during reset, it needs to call scsi_done
* to return the scsi_cmnd to upper layer.
*/
/* Set result to let upper SCSI layer retry */
sc->result = DID_RESET << 16;
scsi_done(sc);
return true;
}
/*
* Clean up any pending aborts on the lun
* For each outstanding IO on this lun, whose abort is not completed by fw,
* issue a local abort. Wait for abort to complete. Return 0 if all commands
* successfully aborted, 1 otherwise
*/
static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_cmnd *lr_sc,
bool new_sc)
{
int ret = 0;
struct fnic_pending_aborts_iter_data iter_data = {
.fnic = fnic,
.lun_dev = lr_sc->device,
.ret = SUCCESS,
};
if (new_sc)
iter_data.lr_sc = lr_sc;
scsi_host_busy_iter(fnic->lport->host,
fnic_pending_aborts_iter, &iter_data);
if (iter_data.ret == FAILED) {
ret = iter_data.ret;
goto clean_pending_aborts_end;
}
schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
/* walk again to check, if IOs are still pending in fw */
if (fnic_is_abts_pending(fnic, lr_sc))
ret = 1;
clean_pending_aborts_end:
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"%s: exit status: %d\n", __func__, ret);
return ret;
}
/*
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
* on the LUN.
*/
int fnic_device_reset(struct scsi_cmnd *sc)
{
struct request *rq = scsi_cmd_to_rq(sc);
struct fc_lport *lp;
struct fnic *fnic;
struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
int status;
int ret = FAILED;
spinlock_t *io_lock;
unsigned long flags;
unsigned long start_time = 0;
struct scsi_lun fc_lun;
struct fnic_stats *fnic_stats;
struct reset_stats *reset_stats;
int tag = rq->tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
bool new_sc = 0;
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
/* Get local-port, check ready and link up */
lp = shost_priv(sc->device->host);
fnic = lport_priv(lp);
fnic_stats = &fnic->fnic_stats;
reset_stats = &fnic->fnic_stats.reset_stats;
atomic64_inc(&reset_stats->device_resets);
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset called FCID 0x%x, LUN 0x%llx sc 0x%p\n",
rport->port_id, sc->device->lun, sc);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
goto fnic_device_reset_end;
/* Check if remote port up */
if (fc_remote_port_chkready(rport)) {
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
goto fnic_device_reset_end;
}
fnic_priv(sc)->flags = FNIC_DEVICE_RESET;
if (unlikely(tag < 0)) {
/*
* For device reset issued through sg3utils, we let
* only one LUN_RESET to go through and use a special
* tag equal to max_tag_id so that we don't have to allocate
* or free it. It won't interact with tags
* allocated by mid layer.
*/
mutex_lock(&fnic->sgreset_mutex);
tag = fnic->fnic_max_tag_id;
new_sc = 1;
}
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
/*
* If there is a io_req attached to this command, then use it,
* else allocate a new one.
*/
if (!io_req) {
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto fnic_device_reset_end;
}
memset(io_req, 0, sizeof(*io_req));
io_req->port_id = rport->port_id;
fnic_priv(sc)->io_req = io_req;
}
io_req->dr_done = &tm_done;
fnic_priv(sc)->state = FNIC_IOREQ_CMD_PENDING;
fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
* and break assoc with scsi cmd
*/
if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if (io_req)
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
spin_lock_irqsave(io_lock, flags);
fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED;
spin_unlock_irqrestore(io_lock, flags);
/*
* Wait on the local completion for LUN reset. The io_req may be
* freed while we wait since we hold no lock.
*/
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"io_req is null tag 0x%x sc 0x%p\n", tag, sc);
goto fnic_device_reset_end;
}
io_req->dr_done = NULL;
status = fnic_priv(sc)->lr_status;
/*
* If lun reset not completed, bail out with failed. io_req
* gets cleaned up during higher levels of EH
*/
if (status == FCPIO_INVALID_CODE) {
atomic64_inc(&reset_stats->device_reset_timeouts);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT;
spin_unlock_irqrestore(io_lock, flags);
int_to_scsilun(sc->device->lun, &fc_lun);
/*
* Issue abort and terminate on device reset request.
* If q'ing of terminate fails, retry it after a delay.
*/
while (1) {
spin_lock_irqsave(io_lock, flags);
if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) {
spin_unlock_irqrestore(io_lock, flags);
break;
}
spin_unlock_irqrestore(io_lock, flags);
if (fnic_queue_abort_io_req(fnic,
tag | FNIC_TAG_DEV_RST,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
} else {
spin_lock_irqsave(io_lock, flags);
fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED;
fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING;
io_req->abts_done = &tm_done;
spin_unlock_irqrestore(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Abort and terminate issued on Device reset "
"tag 0x%x sc 0x%p\n", tag, sc);
break;
}
}
while (1) {
spin_lock_irqsave(io_lock, flags);
if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) {
spin_unlock_irqrestore(io_lock, flags);
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
break;
} else {
io_req = fnic_priv(sc)->io_req;
io_req->abts_done = NULL;
goto fnic_device_reset_clean;
}
}
} else {
spin_unlock_irqrestore(io_lock, flags);
}
/* Completed, but not successful, clean up the io_req, return fail */
if (status != FCPIO_SUCCESS) {
spin_lock_irqsave(io_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
"Device reset completed - failed\n");
io_req = fnic_priv(sc)->io_req;
goto fnic_device_reset_clean;
}
/*
* Clean up any aborts on this lun that have still not
* completed. If any of these fail, then LUN reset fails.
* clean_pending_aborts cleans all cmds on this lun except
* the lun reset cmd. If all cmds get cleaned, the lun reset
* succeeds
*/
if (fnic_clean_pending_aborts(fnic, sc, new_sc)) {
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset failed"
" since could not abort all IOs\n");
goto fnic_device_reset_clean;
}
/* Clean lun reset command */
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if (io_req)
/* Completed, and successful */
ret = SUCCESS;
fnic_device_reset_clean:
if (io_req)
fnic_priv(sc)->io_req = NULL;
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
fnic_device_reset_end:
FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc,
jiffies_to_msecs(jiffies - start_time),
0, ((u64)sc->cmnd[0] << 32 |
(u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
fnic_flags_and_state(sc));
if (new_sc)
mutex_unlock(&fnic->sgreset_mutex);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
if (ret == FAILED)
atomic64_inc(&reset_stats->device_reset_failures);
return ret;
}
/* Clean up all IOs, clean up libFC local port */
int fnic_reset(struct Scsi_Host *shost)
{
struct fc_lport *lp;
struct fnic *fnic;
int ret = 0;
struct reset_stats *reset_stats;
lp = shost_priv(shost);
fnic = lport_priv(lp);
reset_stats = &fnic->fnic_stats.reset_stats;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_reset called\n");
atomic64_inc(&reset_stats->fnic_resets);
/*
* Reset local port, this will clean up libFC exchanges,
* reset remote port sessions, and if link is up, begin flogi
*/
ret = fc_lport_reset(lp);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from fnic reset %s\n",
(ret == 0) ?
"SUCCESS" : "FAILED");
if (ret == 0)
atomic64_inc(&reset_stats->fnic_reset_completions);
else
atomic64_inc(&reset_stats->fnic_reset_failures);
return ret;
}
/*
* SCSI Error handling calls driver's eh_host_reset if all prior
* error handling levels return FAILED. If host reset completes
* successfully, and if link is up, then Fabric login begins.
*
* Host Reset is the highest level of error recovery. If this fails, then
* host is offlined by SCSI.
*
*/
int fnic_host_reset(struct scsi_cmnd *sc)
{
int ret;
unsigned long wait_host_tmo;
struct Scsi_Host *shost = sc->device->host;
struct fc_lport *lp = shost_priv(shost);
struct fnic *fnic = lport_priv(lp);
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (!fnic->internal_reset_inprogress) {
fnic->internal_reset_inprogress = true;
} else {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"host reset in progress skipping another host reset\n");
return SUCCESS;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/*
* If fnic_reset is successful, wait for fabric login to complete
* scsi-ml tries to send a TUR to every device if host reset is
* successful, so before returning to scsi, fabric should be up
*/
ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED;
if (ret == SUCCESS) {
wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
ret = FAILED;
while (time_before(jiffies, wait_host_tmo)) {
if ((lp->state == LPORT_ST_READY) &&
(lp->link_up)) {
ret = SUCCESS;
break;
}
ssleep(1);
}
}
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->internal_reset_inprogress = false;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret;
}
/*
* This fxn is called from libFC when host is removed
*/
void fnic_scsi_abort_io(struct fc_lport *lp)
{
int err = 0;
unsigned long flags;
enum fnic_state old_state;
struct fnic *fnic = lport_priv(lp);
DECLARE_COMPLETION_ONSTACK(remove_wait);
/* Issue firmware reset for fnic, wait for reset to complete */
retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) &&
fnic->link_events) {
/* fw reset is in progress, poll for its completion */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
schedule_timeout(msecs_to_jiffies(100));
goto retry_fw_reset;
}
fnic->remove_wait = &remove_wait;
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
err = fnic_fw_reset_handler(fnic);
if (err) {
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
fnic->state = old_state;
fnic->remove_wait = NULL;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
/* Wait for firmware reset to complete */
wait_for_completion_timeout(&remove_wait,
msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->remove_wait = NULL;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"fnic_scsi_abort_io %s\n",
(fnic->state == FNIC_IN_ETH_MODE) ?
"SUCCESS" : "FAILED");
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
/*
* This fxn called from libFC to clean up driver IO state on link down
*/
void fnic_scsi_cleanup(struct fc_lport *lp)
{
unsigned long flags;
enum fnic_state old_state;
struct fnic *fnic = lport_priv(lp);
/* issue fw reset */
retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
/* fw reset is in progress, poll for its completion */
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
schedule_timeout(msecs_to_jiffies(100));
goto retry_fw_reset;
}
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (fnic_fw_reset_handler(fnic)) {
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
fnic->state = old_state;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
}
}
void fnic_empty_scsi_cleanup(struct fc_lport *lp)
{
}
void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
{
struct fnic *fnic = lport_priv(lp);
/* Non-zero sid, nothing to do */
if (sid)
goto call_fc_exch_mgr_reset;
if (did) {
fnic_rport_exch_reset(fnic, did);
goto call_fc_exch_mgr_reset;
}
/*
* sid = 0, did = 0
* link down or device being removed
*/
if (!fnic->in_remove)
fnic_scsi_cleanup(lp);
else
fnic_scsi_abort_io(lp);
/* call libFC exch mgr reset to reset its exchanges */
call_fc_exch_mgr_reset:
fc_exch_mgr_reset(lp, sid, did);
}
static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data)
{
struct fnic_pending_aborts_iter_data *iter_data = data;
struct fnic *fnic = iter_data->fnic;
int cmd_state;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
/*
* ignore this lun reset cmd or cmds that do not belong to
* this lun
*/
if (iter_data->lr_sc && sc == iter_data->lr_sc)
return true;
if (iter_data->lun_dev && sc->device != iter_data->lun_dev)
return true;
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = fnic_priv(sc)->io_req;
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
return true;
}
/*
* Found IO that is still pending with firmware and
* belongs to the LUN that we are resetting
*/
FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(fnic_priv(sc)->state));
cmd_state = fnic_priv(sc)->state;
spin_unlock_irqrestore(io_lock, flags);
if (cmd_state == FNIC_IOREQ_ABTS_PENDING)
iter_data->ret = 1;
return iter_data->ret ? false : true;
}
/*
* fnic_is_abts_pending() is a helper function that
* walks through tag map to check if there is any IOs pending,if there is one,
* then it returns 1 (true), otherwise 0 (false)
* if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
* otherwise, it checks for all IOs.
*/
int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
{
struct fnic_pending_aborts_iter_data iter_data = {
.fnic = fnic,
.lun_dev = NULL,
.ret = 0,
};
if (lr_sc) {
iter_data.lun_dev = lr_sc->device;
iter_data.lr_sc = lr_sc;
}
/* walk again to check, if IOs are still pending in fw */
scsi_host_busy_iter(fnic->lport->host,
fnic_abts_pending_iter, &iter_data);
return iter_data.ret;
}
| linux-master | drivers/scsi/fnic/fnic_scsi.c |
// SPDX-License-Identifier: GPL-2.0-only
// Copyright 2012 Cisco Systems, Inc. All rights reserved.
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
#include "fnic.h"
static struct dentry *fnic_trace_debugfs_root;
static struct dentry *fnic_trace_debugfs_file;
static struct dentry *fnic_trace_enable;
static struct dentry *fnic_stats_debugfs_root;
static struct dentry *fnic_fc_trace_debugfs_file;
static struct dentry *fnic_fc_rdata_trace_debugfs_file;
static struct dentry *fnic_fc_trace_enable;
static struct dentry *fnic_fc_trace_clear;
struct fc_trace_flag_type {
u8 fc_row_file;
u8 fc_normal_file;
u8 fnic_trace;
u8 fc_trace;
u8 fc_clear;
};
static struct fc_trace_flag_type *fc_trc_flag;
/*
* fnic_debugfs_init - Initialize debugfs for fnic debug logging
*
* Description:
* When Debugfs is configured this routine sets up the fnic debugfs
* file system. If not already created, this routine will create the
* fnic directory and statistics directory for trace buffer and
* stats logging.
*/
int fnic_debugfs_init(void)
{
fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
fnic_stats_debugfs_root = debugfs_create_dir("statistics",
fnic_trace_debugfs_root);
/* Allocate memory to structure */
fc_trc_flag = vmalloc(sizeof(struct fc_trace_flag_type));
if (fc_trc_flag) {
fc_trc_flag->fc_row_file = 0;
fc_trc_flag->fc_normal_file = 1;
fc_trc_flag->fnic_trace = 2;
fc_trc_flag->fc_trace = 3;
fc_trc_flag->fc_clear = 4;
}
return 0;
}
/*
* fnic_debugfs_terminate - Tear down debugfs infrastructure
*
* Description:
* When Debugfs is configured this routine removes debugfs file system
* elements that are specific to fnic.
*/
void fnic_debugfs_terminate(void)
{
debugfs_remove(fnic_stats_debugfs_root);
fnic_stats_debugfs_root = NULL;
debugfs_remove(fnic_trace_debugfs_root);
fnic_trace_debugfs_root = NULL;
vfree(fc_trc_flag);
}
/*
* fnic_trace_ctrl_read -
* Read trace_enable ,fc_trace_enable
* or fc_trace_clear debugfs file
* @filp: The file pointer to read from.
* @ubuf: The buffer to copy the data to.
* @cnt: The number of bytes to read.
* @ppos: The position in the file to start reading from.
*
* Description:
* This routine reads value of variable fnic_tracing_enabled or
* fnic_fc_tracing_enabled or fnic_fc_trace_cleared
* and stores into local @buf.
* It will start reading file at @ppos and
* copy up to @cnt of data to @ubuf from @buf.
*
* Returns:
* This function returns the amount of data that was read.
*/
static ssize_t fnic_trace_ctrl_read(struct file *filp,
char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
int len;
u8 *trace_type;
len = 0;
trace_type = (u8 *)filp->private_data;
if (*trace_type == fc_trc_flag->fnic_trace)
len = sprintf(buf, "%d\n", fnic_tracing_enabled);
else if (*trace_type == fc_trc_flag->fc_trace)
len = sprintf(buf, "%d\n", fnic_fc_tracing_enabled);
else if (*trace_type == fc_trc_flag->fc_clear)
len = sprintf(buf, "%d\n", fnic_fc_trace_cleared);
else
pr_err("fnic: Cannot read to any debugfs file\n");
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
/*
* fnic_trace_ctrl_write -
* Write to trace_enable, fc_trace_enable or
* fc_trace_clear debugfs file
* @filp: The file pointer to write from.
* @ubuf: The buffer to copy the data from.
* @cnt: The number of bytes to write.
* @ppos: The position in the file to start writing to.
*
* Description:
* This routine writes data from user buffer @ubuf to buffer @buf and
* sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared
* value as per user input.
*
* Returns:
* This function returns the amount of data that was written.
*/
static ssize_t fnic_trace_ctrl_write(struct file *filp,
const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[64];
unsigned long val;
int ret;
u8 *trace_type;
trace_type = (u8 *)filp->private_data;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
ret = kstrtoul(buf, 10, &val);
if (ret < 0)
return ret;
if (*trace_type == fc_trc_flag->fnic_trace)
fnic_tracing_enabled = val;
else if (*trace_type == fc_trc_flag->fc_trace)
fnic_fc_tracing_enabled = val;
else if (*trace_type == fc_trc_flag->fc_clear)
fnic_fc_trace_cleared = val;
else
pr_err("fnic: cannot write to any debugfs file\n");
(*ppos)++;
return cnt;
}
static const struct file_operations fnic_trace_ctrl_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = fnic_trace_ctrl_read,
.write = fnic_trace_ctrl_write,
};
/*
* fnic_trace_debugfs_open - Open the fnic trace log
* @inode: The inode pointer
* @file: The file pointer to attach the log output
*
* Description:
* This routine is the entry point for the debugfs open file operation.
* It allocates the necessary buffer for the log, fills the buffer from
* the in-memory log and then returns a pointer to that log in
* the private_data field in @file.
*
* Returns:
* This function returns zero if successful. On error it will return
* a negative error value.
*/
static int fnic_trace_debugfs_open(struct inode *inode,
struct file *file)
{
fnic_dbgfs_t *fnic_dbg_prt;
u8 *rdata_ptr;
rdata_ptr = (u8 *)inode->i_private;
fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
if (!fnic_dbg_prt)
return -ENOMEM;
if (*rdata_ptr == fc_trc_flag->fnic_trace) {
fnic_dbg_prt->buffer = vzalloc(array3_size(3, trace_max_pages,
PAGE_SIZE));
if (!fnic_dbg_prt->buffer) {
kfree(fnic_dbg_prt);
return -ENOMEM;
}
fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
} else {
fnic_dbg_prt->buffer =
vzalloc(array3_size(3, fnic_fc_trace_max_pages,
PAGE_SIZE));
if (!fnic_dbg_prt->buffer) {
kfree(fnic_dbg_prt);
return -ENOMEM;
}
fnic_dbg_prt->buffer_len =
fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr);
}
file->private_data = fnic_dbg_prt;
return 0;
}
/*
* fnic_trace_debugfs_lseek - Seek through a debugfs file
* @file: The file pointer to seek through.
* @offset: The offset to seek to or the amount to seek by.
* @howto: Indicates how to seek.
*
* Description:
* This routine is the entry point for the debugfs lseek file operation.
* The @howto parameter indicates whether @offset is the offset to directly
* seek to, or if it is a value to seek forward or reverse by. This function
* figures out what the new offset of the debugfs file will be and assigns
* that value to the f_pos field of @file.
*
* Returns:
* This function returns the new offset if successful and returns a negative
* error if unable to process the seek.
*/
static loff_t fnic_trace_debugfs_lseek(struct file *file,
loff_t offset,
int howto)
{
fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
return fixed_size_llseek(file, offset, howto,
fnic_dbg_prt->buffer_len);
}
/*
* fnic_trace_debugfs_read - Read a debugfs file
* @file: The file pointer to read from.
* @ubuf: The buffer to copy the data to.
* @nbytes: The number of bytes to read.
* @pos: The position in the file to start reading from.
*
* Description:
* This routine reads data from the buffer indicated in the private_data
* field of @file. It will start reading at @pos and copy up to @nbytes of
* data to @ubuf.
*
* Returns:
* This function returns the amount of data that was read (this could be
* less than @nbytes if the end of the file was reached).
*/
static ssize_t fnic_trace_debugfs_read(struct file *file,
char __user *ubuf,
size_t nbytes,
loff_t *pos)
{
fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
int rc = 0;
rc = simple_read_from_buffer(ubuf, nbytes, pos,
fnic_dbg_prt->buffer,
fnic_dbg_prt->buffer_len);
return rc;
}
/*
* fnic_trace_debugfs_release - Release the buffer used to store
* debugfs file data
* @inode: The inode pointer
* @file: The file pointer that contains the buffer to release
*
* Description:
* This routine frees the buffer that was allocated when the debugfs
* file was opened.
*
* Returns:
* This function returns zero.
*/
static int fnic_trace_debugfs_release(struct inode *inode,
struct file *file)
{
fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
vfree(fnic_dbg_prt->buffer);
kfree(fnic_dbg_prt);
return 0;
}
static const struct file_operations fnic_trace_debugfs_fops = {
.owner = THIS_MODULE,
.open = fnic_trace_debugfs_open,
.llseek = fnic_trace_debugfs_lseek,
.read = fnic_trace_debugfs_read,
.release = fnic_trace_debugfs_release,
};
/*
* fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging
*
* Description:
* When Debugfs is configured this routine sets up the fnic debugfs
* file system. If not already created, this routine will create the
* create file trace to log fnic trace buffer output into debugfs and
* it will also create file trace_enable to control enable/disable of
* trace logging into trace buffer.
*/
void fnic_trace_debugfs_init(void)
{
fnic_trace_enable = debugfs_create_file("tracing_enable",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fnic_trace),
&fnic_trace_ctrl_fops);
fnic_trace_debugfs_file = debugfs_create_file("trace",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fnic_trace),
&fnic_trace_debugfs_fops);
}
/*
* fnic_trace_debugfs_terminate - Tear down debugfs infrastructure
*
* Description:
* When Debugfs is configured this routine removes debugfs file system
* elements that are specific to fnic trace logging.
*/
void fnic_trace_debugfs_terminate(void)
{
debugfs_remove(fnic_trace_debugfs_file);
fnic_trace_debugfs_file = NULL;
debugfs_remove(fnic_trace_enable);
fnic_trace_enable = NULL;
}
/*
* fnic_fc_trace_debugfs_init -
* Initialize debugfs for fnic control frame trace logging
*
* Description:
* When Debugfs is configured this routine sets up the fnic_fc debugfs
* file system. If not already created, this routine will create the
* create file trace to log fnic fc trace buffer output into debugfs and
* it will also create file fc_trace_enable to control enable/disable of
* trace logging into trace buffer.
*/
void fnic_fc_trace_debugfs_init(void)
{
fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fc_trace),
&fnic_trace_ctrl_fops);
fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fc_clear),
&fnic_trace_ctrl_fops);
fnic_fc_rdata_trace_debugfs_file =
debugfs_create_file("fc_trace_rdata",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fc_normal_file),
&fnic_trace_debugfs_fops);
fnic_fc_trace_debugfs_file =
debugfs_create_file("fc_trace",
S_IFREG|S_IRUGO|S_IWUSR,
fnic_trace_debugfs_root,
&(fc_trc_flag->fc_row_file),
&fnic_trace_debugfs_fops);
}
/*
* fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure
*
* Description:
* When Debugfs is configured this routine removes debugfs file system
* elements that are specific to fnic_fc trace logging.
*/
void fnic_fc_trace_debugfs_terminate(void)
{
debugfs_remove(fnic_fc_trace_debugfs_file);
fnic_fc_trace_debugfs_file = NULL;
debugfs_remove(fnic_fc_rdata_trace_debugfs_file);
fnic_fc_rdata_trace_debugfs_file = NULL;
debugfs_remove(fnic_fc_trace_enable);
fnic_fc_trace_enable = NULL;
debugfs_remove(fnic_fc_trace_clear);
fnic_fc_trace_clear = NULL;
}
/*
* fnic_reset_stats_open - Open the reset_stats file
* @inode: The inode pointer.
* @file: The file pointer to attach the stats reset flag.
*
* Description:
* This routine opens a debugsfs file reset_stats and stores i_private data
* to debug structure to retrieve later for while performing other
* file oprations.
*
* Returns:
* This function returns zero if successful.
*/
static int fnic_reset_stats_open(struct inode *inode, struct file *file)
{
struct stats_debug_info *debug;
debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
if (!debug)
return -ENOMEM;
debug->i_private = inode->i_private;
file->private_data = debug;
return 0;
}
/*
* fnic_reset_stats_read - Read a reset_stats debugfs file
* @filp: The file pointer to read from.
* @ubuf: The buffer to copy the data to.
* @cnt: The number of bytes to read.
* @ppos: The position in the file to start reading from.
*
* Description:
* This routine reads value of variable reset_stats
* and stores into local @buf. It will start reading file at @ppos and
* copy up to @cnt of data to @ubuf from @buf.
*
* Returns:
* This function returns the amount of data that was read.
*/
static ssize_t fnic_reset_stats_read(struct file *file,
char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct stats_debug_info *debug = file->private_data;
struct fnic *fnic = (struct fnic *)debug->i_private;
char buf[64];
int len;
len = sprintf(buf, "%u\n", fnic->reset_stats);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
/*
* fnic_reset_stats_write - Write to reset_stats debugfs file
* @filp: The file pointer to write from.
* @ubuf: The buffer to copy the data from.
* @cnt: The number of bytes to write.
* @ppos: The position in the file to start writing to.
*
* Description:
* This routine writes data from user buffer @ubuf to buffer @buf and
* resets cumulative stats of fnic.
*
* Returns:
* This function returns the amount of data that was written.
*/
static ssize_t fnic_reset_stats_write(struct file *file,
const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct stats_debug_info *debug = file->private_data;
struct fnic *fnic = (struct fnic *)debug->i_private;
struct fnic_stats *stats = &fnic->fnic_stats;
u64 *io_stats_p = (u64 *)&stats->io_stats;
u64 *fw_stats_p = (u64 *)&stats->fw_stats;
char buf[64];
unsigned long val;
int ret;
if (cnt >= sizeof(buf))
return -EINVAL;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
ret = kstrtoul(buf, 10, &val);
if (ret < 0)
return ret;
fnic->reset_stats = val;
if (fnic->reset_stats) {
/* Skip variable is used to avoid descrepancies to Num IOs
* and IO Completions stats. Skip incrementing No IO Compls
* for pending active IOs after reset stats
*/
atomic64_set(&fnic->io_cmpl_skip,
atomic64_read(&stats->io_stats.active_ios));
memset(&stats->abts_stats, 0, sizeof(struct abort_stats));
memset(&stats->term_stats, 0,
sizeof(struct terminate_stats));
memset(&stats->reset_stats, 0, sizeof(struct reset_stats));
memset(&stats->misc_stats, 0, sizeof(struct misc_stats));
memset(&stats->vlan_stats, 0, sizeof(struct vlan_stats));
memset(io_stats_p+1, 0,
sizeof(struct io_path_stats) - sizeof(u64));
memset(fw_stats_p+1, 0,
sizeof(struct fw_stats) - sizeof(u64));
ktime_get_real_ts64(&stats->stats_timestamps.last_reset_time);
}
(*ppos)++;
return cnt;
}
/*
* fnic_reset_stats_release - Release the buffer used to store
* debugfs file data
* @inode: The inode pointer
* @file: The file pointer that contains the buffer to release
*
* Description:
* This routine frees the buffer that was allocated when the debugfs
* file was opened.
*
* Returns:
* This function returns zero.
*/
static int fnic_reset_stats_release(struct inode *inode,
struct file *file)
{
struct stats_debug_info *debug = file->private_data;
kfree(debug);
return 0;
}
/*
* fnic_stats_debugfs_open - Open the stats file for specific host
* and get fnic stats.
* @inode: The inode pointer.
* @file: The file pointer to attach the specific host statistics.
*
* Description:
* This routine opens a debugsfs file stats of specific host and print
* fnic stats.
*
* Returns:
* This function returns zero if successful.
*/
static int fnic_stats_debugfs_open(struct inode *inode,
struct file *file)
{
struct fnic *fnic = inode->i_private;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct stats_debug_info *debug;
int buf_size = 2 * PAGE_SIZE;
debug = kzalloc(sizeof(struct stats_debug_info), GFP_KERNEL);
if (!debug)
return -ENOMEM;
debug->debug_buffer = vmalloc(buf_size);
if (!debug->debug_buffer) {
kfree(debug);
return -ENOMEM;
}
debug->buf_size = buf_size;
memset((void *)debug->debug_buffer, 0, buf_size);
debug->buffer_len = fnic_get_stats_data(debug, fnic_stats);
file->private_data = debug;
return 0;
}
/*
* fnic_stats_debugfs_read - Read a debugfs file
* @file: The file pointer to read from.
* @ubuf: The buffer to copy the data to.
* @nbytes: The number of bytes to read.
* @pos: The position in the file to start reading from.
*
* Description:
* This routine reads data from the buffer indicated in the private_data
* field of @file. It will start reading at @pos and copy up to @nbytes of
* data to @ubuf.
*
* Returns:
* This function returns the amount of data that was read (this could be
* less than @nbytes if the end of the file was reached).
*/
static ssize_t fnic_stats_debugfs_read(struct file *file,
char __user *ubuf,
size_t nbytes,
loff_t *pos)
{
struct stats_debug_info *debug = file->private_data;
int rc = 0;
rc = simple_read_from_buffer(ubuf, nbytes, pos,
debug->debug_buffer,
debug->buffer_len);
return rc;
}
/*
* fnic_stats_stats_release - Release the buffer used to store
* debugfs file data
* @inode: The inode pointer
* @file: The file pointer that contains the buffer to release
*
* Description:
* This routine frees the buffer that was allocated when the debugfs
* file was opened.
*
* Returns:
* This function returns zero.
*/
static int fnic_stats_debugfs_release(struct inode *inode,
struct file *file)
{
struct stats_debug_info *debug = file->private_data;
vfree(debug->debug_buffer);
kfree(debug);
return 0;
}
static const struct file_operations fnic_stats_debugfs_fops = {
.owner = THIS_MODULE,
.open = fnic_stats_debugfs_open,
.read = fnic_stats_debugfs_read,
.release = fnic_stats_debugfs_release,
};
static const struct file_operations fnic_reset_debugfs_fops = {
.owner = THIS_MODULE,
.open = fnic_reset_stats_open,
.read = fnic_reset_stats_read,
.write = fnic_reset_stats_write,
.release = fnic_reset_stats_release,
};
/*
* fnic_stats_init - Initialize stats struct and create stats file per fnic
*
* Description:
* When Debugfs is configured this routine sets up the stats file per fnic
* It will create file stats and reset_stats under statistics/host# directory
* to log per fnic stats.
*/
void fnic_stats_debugfs_init(struct fnic *fnic)
{
char name[16];
snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no);
fnic->fnic_stats_debugfs_host = debugfs_create_dir(name,
fnic_stats_debugfs_root);
fnic->fnic_stats_debugfs_file = debugfs_create_file("stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_stats_debugfs_fops);
fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats",
S_IFREG|S_IRUGO|S_IWUSR,
fnic->fnic_stats_debugfs_host,
fnic,
&fnic_reset_debugfs_fops);
}
/*
* fnic_stats_debugfs_remove - Tear down debugfs infrastructure of stats
*
* Description:
* When Debugfs is configured this routine removes debugfs file system
* elements that are specific to fnic stats.
*/
void fnic_stats_debugfs_remove(struct fnic *fnic)
{
if (!fnic)
return;
debugfs_remove(fnic->fnic_stats_debugfs_file);
fnic->fnic_stats_debugfs_file = NULL;
debugfs_remove(fnic->fnic_reset_debugfs_file);
fnic->fnic_reset_debugfs_file = NULL;
debugfs_remove(fnic->fnic_stats_debugfs_host);
fnic->fnic_stats_debugfs_host = NULL;
}
| linux-master | drivers/scsi/fnic/fnic_debugfs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "vnic_dev.h"
#include "vnic_cq.h"
void vnic_cq_free(struct vnic_cq *cq)
{
vnic_dev_free_desc_ring(cq->vdev, &cq->ring);
cq->ctrl = NULL;
}
int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
cq->index = index;
cq->vdev = vdev;
cq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_CQ, index);
if (!cq->ctrl) {
printk(KERN_ERR "Failed to hook CQ[%d] resource\n", index);
return -EINVAL;
}
err = vnic_dev_alloc_desc_ring(vdev, &cq->ring, desc_count, desc_size);
if (err)
return err;
return 0;
}
void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int cq_message_enable,
unsigned int interrupt_offset, u64 cq_message_addr)
{
u64 paddr;
paddr = (u64)cq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &cq->ctrl->ring_base);
iowrite32(cq->ring.desc_count, &cq->ctrl->ring_size);
iowrite32(flow_control_enable, &cq->ctrl->flow_control_enable);
iowrite32(color_enable, &cq->ctrl->color_enable);
iowrite32(cq_head, &cq->ctrl->cq_head);
iowrite32(cq_tail, &cq->ctrl->cq_tail);
iowrite32(cq_tail_color, &cq->ctrl->cq_tail_color);
iowrite32(interrupt_enable, &cq->ctrl->interrupt_enable);
iowrite32(cq_entry_enable, &cq->ctrl->cq_entry_enable);
iowrite32(cq_message_enable, &cq->ctrl->cq_message_enable);
iowrite32(interrupt_offset, &cq->ctrl->interrupt_offset);
writeq(cq_message_addr, &cq->ctrl->cq_message_addr);
}
void vnic_cq_clean(struct vnic_cq *cq)
{
cq->to_clean = 0;
cq->last_color = 0;
iowrite32(0, &cq->ctrl->cq_head);
iowrite32(0, &cq->ctrl->cq_tail);
iowrite32(1, &cq->ctrl->cq_tail_color);
vnic_dev_clear_desc_ring(&cq->ring);
}
| linux-master | drivers/scsi/fnic/vnic_cq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "fnic_io.h"
#include "fnic.h"
static irqreturn_t fnic_isr_legacy(int irq, void *data)
{
struct fnic *fnic = data;
u32 pba;
unsigned long work_done = 0;
pba = vnic_intr_legacy_pba(fnic->legacy_pba);
if (!pba)
return IRQ_NONE;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
if (pba & (1 << FNIC_INTX_NOTIFY)) {
vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]);
fnic_handle_link_event(fnic);
}
if (pba & (1 << FNIC_INTX_ERR)) {
vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]);
fnic_log_q_error(fnic);
}
if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) {
work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ],
work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
}
return IRQ_HANDLED;
}
static irqreturn_t fnic_isr_msi(int irq, void *data)
{
struct fnic *fnic = data;
unsigned long work_done = 0;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
work_done += fnic_wq_copy_cmpl_handler(fnic, io_completions);
work_done += fnic_wq_cmpl_handler(fnic, -1);
work_done += fnic_rq_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[0],
work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
}
static irqreturn_t fnic_isr_msix_rq(int irq, void *data)
{
struct fnic *fnic = data;
unsigned long rq_work_done = 0;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
rq_work_done = fnic_rq_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ],
rq_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
}
static irqreturn_t fnic_isr_msix_wq(int irq, void *data)
{
struct fnic *fnic = data;
unsigned long wq_work_done = 0;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
wq_work_done = fnic_wq_cmpl_handler(fnic, -1);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ],
wq_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
}
static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data)
{
struct fnic *fnic = data;
unsigned long wq_copy_work_done = 0;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, io_completions);
vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY],
wq_copy_work_done,
1 /* unmask intr */,
1 /* reset intr timer */);
return IRQ_HANDLED;
}
static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data)
{
struct fnic *fnic = data;
fnic->fnic_stats.misc_stats.last_isr_time = jiffies;
atomic64_inc(&fnic->fnic_stats.misc_stats.isr_count);
vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]);
fnic_log_q_error(fnic);
fnic_handle_link_event(fnic);
return IRQ_HANDLED;
}
void fnic_free_intr(struct fnic *fnic)
{
int i;
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
case VNIC_DEV_INTR_MODE_MSI:
free_irq(pci_irq_vector(fnic->pdev, 0), fnic);
break;
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < ARRAY_SIZE(fnic->msix); i++)
if (fnic->msix[i].requested)
free_irq(pci_irq_vector(fnic->pdev, i),
fnic->msix[i].devid);
break;
default:
break;
}
}
int fnic_request_intr(struct fnic *fnic)
{
int err = 0;
int i;
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
err = request_irq(pci_irq_vector(fnic->pdev, 0),
&fnic_isr_legacy, IRQF_SHARED, DRV_NAME, fnic);
break;
case VNIC_DEV_INTR_MODE_MSI:
err = request_irq(pci_irq_vector(fnic->pdev, 0), &fnic_isr_msi,
0, fnic->name, fnic);
break;
case VNIC_DEV_INTR_MODE_MSIX:
sprintf(fnic->msix[FNIC_MSIX_RQ].devname,
"%.11s-fcs-rq", fnic->name);
fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq;
fnic->msix[FNIC_MSIX_RQ].devid = fnic;
sprintf(fnic->msix[FNIC_MSIX_WQ].devname,
"%.11s-fcs-wq", fnic->name);
fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
fnic->msix[FNIC_MSIX_WQ].devid = fnic;
sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
"%.11s-scsi-wq", fnic->name);
fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;
sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
"%.11s-err-notify", fnic->name);
fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
fnic_isr_msix_err_notify;
fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;
for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
err = request_irq(pci_irq_vector(fnic->pdev, i),
fnic->msix[i].isr, 0,
fnic->msix[i].devname,
fnic->msix[i].devid);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"MSIX: request_irq"
" failed %d\n", err);
fnic_free_intr(fnic);
break;
}
fnic->msix[i].requested = 1;
}
break;
default:
break;
}
return err;
}
int fnic_set_intr_mode(struct fnic *fnic)
{
unsigned int n = ARRAY_SIZE(fnic->rq);
unsigned int m = ARRAY_SIZE(fnic->wq);
unsigned int o = ARRAY_SIZE(fnic->wq_copy);
/*
* Set interrupt mode (INTx, MSI, MSI-X) depending
* system capabilities.
*
* Try MSI-X first
*
* We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs
* (last INTR is used for WQ/RQ errors and notification area)
*/
if (fnic->rq_count >= n &&
fnic->raw_wq_count >= m &&
fnic->wq_copy_count >= o &&
fnic->cq_count >= n + m + o) {
int vecs = n + m + o + 1;
if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs,
PCI_IRQ_MSIX) == vecs) {
fnic->rq_count = n;
fnic->raw_wq_count = m;
fnic->wq_copy_count = o;
fnic->wq_count = m + o;
fnic->cq_count = n + m + o;
fnic->intr_count = vecs;
fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY;
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
"Using MSI-X Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev,
VNIC_DEV_INTR_MODE_MSIX);
return 0;
}
}
/*
* Next try MSI
* We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR
*/
if (fnic->rq_count >= 1 &&
fnic->raw_wq_count >= 1 &&
fnic->wq_copy_count >= 1 &&
fnic->cq_count >= 3 &&
fnic->intr_count >= 1 &&
pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
fnic->rq_count = 1;
fnic->raw_wq_count = 1;
fnic->wq_copy_count = 1;
fnic->wq_count = 2;
fnic->cq_count = 3;
fnic->intr_count = 1;
fnic->err_intr_offset = 0;
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
"Using MSI Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI);
return 0;
}
/*
* Next try INTx
* We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs
* 1 INTR is used for all 3 queues, 1 INTR for queue errors
* 1 INTR for notification area
*/
if (fnic->rq_count >= 1 &&
fnic->raw_wq_count >= 1 &&
fnic->wq_copy_count >= 1 &&
fnic->cq_count >= 3 &&
fnic->intr_count >= 3) {
fnic->rq_count = 1;
fnic->raw_wq_count = 1;
fnic->wq_copy_count = 1;
fnic->cq_count = 3;
fnic->intr_count = 3;
FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host,
"Using Legacy Interrupts\n");
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
return 0;
}
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
return -EINVAL;
}
void fnic_clear_intr_mode(struct fnic *fnic)
{
pci_free_irq_vectors(fnic->pdev);
vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX);
}
| linux-master | drivers/scsi/fnic/fnic_isr.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/if_ether.h>
#include <scsi/fc/fc_fip.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_tcq.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include "vnic_dev.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "fnic_io.h"
#include "fnic_fip.h"
#include "fnic.h"
#define PCI_DEVICE_ID_CISCO_FNIC 0x0045
/* Timer to poll notification area for events. Used for MSI interrupts */
#define FNIC_NOTIFY_TIMER_PERIOD (2 * HZ)
static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES];
static struct kmem_cache *fnic_io_req_cache;
static LIST_HEAD(fnic_list);
static DEFINE_SPINLOCK(fnic_list_lock);
/* Supported devices by fnic module */
static struct pci_device_id fnic_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) },
{ 0, }
};
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR("Abhijeet Joglekar <[email protected]>, "
"Joseph R. Eykholt <[email protected]>");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, fnic_id_table);
unsigned int fnic_log_level;
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS;
module_param(io_completions, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(io_completions, "Max CQ entries to process at a time");
unsigned int fnic_trace_max_pages = 16;
module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
"for fnic trace buffer");
unsigned int fnic_fc_trace_max_pages = 64;
module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_fc_trace_max_pages,
"Total allocated memory pages for fc trace buffer");
static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
static struct libfc_function_template fnic_transport_template = {
.frame_send = fnic_send,
.lport_set_port_id = fnic_set_port_id,
.fcp_abort_io = fnic_empty_scsi_cleanup,
.fcp_cleanup = fnic_empty_scsi_cleanup,
.exch_mgr_reset = fnic_exch_mgr_reset
};
static int fnic_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
scsi_change_queue_depth(sdev, fnic_max_qdepth);
return 0;
}
static const struct scsi_host_template fnic_host_template = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = fnic_queuecommand,
.eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = fnic_abort_cmd,
.eh_device_reset_handler = fnic_device_reset,
.eh_host_reset_handler = fnic_host_reset,
.slave_alloc = fnic_slave_alloc,
.change_queue_depth = scsi_change_queue_depth,
.this_id = -1,
.cmd_per_lun = 3,
.can_queue = FNIC_DFLT_IO_REQ,
.sg_tablesize = FNIC_MAX_SG_DESC_CNT,
.max_sectors = 0xffff,
.shost_groups = fnic_host_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct fnic_cmd_priv),
};
static void
fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout)
{
if (timeout)
rport->dev_loss_tmo = timeout;
else
rport->dev_loss_tmo = 1;
}
static void fnic_get_host_speed(struct Scsi_Host *shost);
static struct scsi_transport_template *fnic_fc_transport;
static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *);
static void fnic_reset_host_stats(struct Scsi_Host *);
static struct fc_function_template fnic_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
.show_host_supported_fc4s = 1,
.show_host_active_fc4s = 1,
.show_host_maxframe_size = 1,
.show_host_port_id = 1,
.show_host_supported_speeds = 1,
.get_host_speed = fnic_get_host_speed,
.show_host_speed = 1,
.show_host_port_type = 1,
.get_host_port_state = fc_get_host_port_state,
.show_host_port_state = 1,
.show_host_symbolic_name = 1,
.show_rport_maxframe_size = 1,
.show_rport_supported_classes = 1,
.show_host_fabric_name = 1,
.show_starget_node_name = 1,
.show_starget_port_name = 1,
.show_starget_port_id = 1,
.show_rport_dev_loss_tmo = 1,
.set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo,
.issue_fc_host_lip = fnic_reset,
.get_fc_host_stats = fnic_get_stats,
.reset_fc_host_stats = fnic_reset_host_stats,
.dd_fcrport_size = sizeof(struct fc_rport_libfc_priv),
.terminate_rport_io = fnic_terminate_rport_io,
.bsg_request = fc_lport_bsg_request,
};
static void fnic_get_host_speed(struct Scsi_Host *shost)
{
struct fc_lport *lp = shost_priv(shost);
struct fnic *fnic = lport_priv(lp);
u32 port_speed = vnic_dev_port_speed(fnic->vdev);
/* Add in other values as they get defined in fw */
switch (port_speed) {
case DCEM_PORTSPEED_10G:
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
break;
case DCEM_PORTSPEED_20G:
fc_host_speed(shost) = FC_PORTSPEED_20GBIT;
break;
case DCEM_PORTSPEED_25G:
fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
break;
case DCEM_PORTSPEED_40G:
case DCEM_PORTSPEED_4x10G:
fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
break;
case DCEM_PORTSPEED_100G:
fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
break;
default:
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
break;
}
}
static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host)
{
int ret;
struct fc_lport *lp = shost_priv(host);
struct fnic *fnic = lport_priv(lp);
struct fc_host_statistics *stats = &lp->host_stats;
struct vnic_stats *vs;
unsigned long flags;
if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT))
return stats;
fnic->stats_time = jiffies;
spin_lock_irqsave(&fnic->fnic_lock, flags);
ret = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
"fnic: Get vnic stats failed"
" 0x%x", ret);
return stats;
}
vs = fnic->stats;
stats->tx_frames = vs->tx.tx_unicast_frames_ok;
stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4;
stats->rx_frames = vs->rx.rx_unicast_frames_ok;
stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4;
stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors;
stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop;
stats->invalid_crc_count = vs->rx.rx_crc_errors;
stats->seconds_since_last_reset =
(jiffies - fnic->stats_reset_time) / HZ;
stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000);
stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000);
return stats;
}
/*
* fnic_dump_fchost_stats
* note : dumps fc_statistics into system logs
*/
void fnic_dump_fchost_stats(struct Scsi_Host *host,
struct fc_host_statistics *stats)
{
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: seconds since last reset = %llu\n",
stats->seconds_since_last_reset);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: tx frames = %llu\n",
stats->tx_frames);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: tx words = %llu\n",
stats->tx_words);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: rx frames = %llu\n",
stats->rx_frames);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: rx words = %llu\n",
stats->rx_words);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: lip count = %llu\n",
stats->lip_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: nos count = %llu\n",
stats->nos_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: error frames = %llu\n",
stats->error_frames);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: dumped frames = %llu\n",
stats->dumped_frames);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: link failure count = %llu\n",
stats->link_failure_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: loss of sync count = %llu\n",
stats->loss_of_sync_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: loss of signal count = %llu\n",
stats->loss_of_signal_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: prim seq protocol err count = %llu\n",
stats->prim_seq_protocol_err_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: invalid tx word count= %llu\n",
stats->invalid_tx_word_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: invalid crc count = %llu\n",
stats->invalid_crc_count);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: fcp input requests = %llu\n",
stats->fcp_input_requests);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: fcp output requests = %llu\n",
stats->fcp_output_requests);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: fcp control requests = %llu\n",
stats->fcp_control_requests);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: fcp input megabytes = %llu\n",
stats->fcp_input_megabytes);
FNIC_MAIN_NOTE(KERN_NOTICE, host,
"fnic: fcp output megabytes = %llu\n",
stats->fcp_output_megabytes);
return;
}
/*
* fnic_reset_host_stats : clears host stats
* note : called when reset_statistics set under sysfs dir
*/
static void fnic_reset_host_stats(struct Scsi_Host *host)
{
int ret;
struct fc_lport *lp = shost_priv(host);
struct fnic *fnic = lport_priv(lp);
struct fc_host_statistics *stats;
unsigned long flags;
/* dump current stats, before clearing them */
stats = fnic_get_stats(host);
fnic_dump_fchost_stats(host, stats);
spin_lock_irqsave(&fnic->fnic_lock, flags);
ret = vnic_dev_stats_clear(fnic->vdev);
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (ret) {
FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host,
"fnic: Reset vnic stats failed"
" 0x%x", ret);
return;
}
fnic->stats_reset_time = jiffies;
memset(stats, 0, sizeof(*stats));
return;
}
void fnic_log_q_error(struct fnic *fnic)
{
unsigned int i;
u32 error_status;
for (i = 0; i < fnic->raw_wq_count; i++) {
error_status = ioread32(&fnic->wq[i].ctrl->error_status);
if (error_status)
shost_printk(KERN_ERR, fnic->lport->host,
"WQ[%d] error_status"
" %d\n", i, error_status);
}
for (i = 0; i < fnic->rq_count; i++) {
error_status = ioread32(&fnic->rq[i].ctrl->error_status);
if (error_status)
shost_printk(KERN_ERR, fnic->lport->host,
"RQ[%d] error_status"
" %d\n", i, error_status);
}
for (i = 0; i < fnic->wq_copy_count; i++) {
error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
if (error_status)
shost_printk(KERN_ERR, fnic->lport->host,
"CWQ[%d] error_status"
" %d\n", i, error_status);
}
}
void fnic_handle_link_event(struct fnic *fnic)
{
unsigned long flags;
spin_lock_irqsave(&fnic->fnic_lock, flags);
if (fnic->stop_rx_link_events) {
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return;
}
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
queue_work(fnic_event_queue, &fnic->link_work);
}
static int fnic_notify_set(struct fnic *fnic)
{
int err;
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
break;
case VNIC_DEV_INTR_MODE_MSI:
err = vnic_dev_notify_set(fnic->vdev, -1);
break;
case VNIC_DEV_INTR_MODE_MSIX:
err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
break;
default:
shost_printk(KERN_ERR, fnic->lport->host,
"Interrupt mode should be set up"
" before devcmd notify set %d\n",
vnic_dev_get_intr_mode(fnic->vdev));
err = -1;
break;
}
return err;
}
static void fnic_notify_timer(struct timer_list *t)
{
struct fnic *fnic = from_timer(fnic, t, notify_timer);
fnic_handle_link_event(fnic);
mod_timer(&fnic->notify_timer,
round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
}
static void fnic_fip_notify_timer(struct timer_list *t)
{
struct fnic *fnic = from_timer(fnic, t, fip_timer);
fnic_handle_fip_timer(fnic);
}
static void fnic_notify_timer_start(struct fnic *fnic)
{
switch (vnic_dev_get_intr_mode(fnic->vdev)) {
case VNIC_DEV_INTR_MODE_MSI:
/*
* Schedule first timeout immediately. The driver is
* initiatialized and ready to look for link up notification
*/
mod_timer(&fnic->notify_timer, jiffies);
break;
default:
/* Using intr for notification for INTx/MSI-X */
break;
}
}
static int fnic_dev_wait(struct vnic_dev *vdev,
int (*start)(struct vnic_dev *, int),
int (*finished)(struct vnic_dev *, int *),
int arg)
{
unsigned long time;
int done;
int err;
int count;
count = 0;
err = start(vdev, arg);
if (err)
return err;
/* Wait for func to complete.
* Sometime schedule_timeout_uninterruptible take long time
* to wake up so we do not retry as we are only waiting for
* 2 seconds in while loop. By adding count, we make sure
* we try atleast three times before returning -ETIMEDOUT
*/
time = jiffies + (HZ * 2);
do {
err = finished(vdev, &done);
count++;
if (err)
return err;
if (done)
return 0;
schedule_timeout_uninterruptible(HZ / 10);
} while (time_after(time, jiffies) || (count < 3));
return -ETIMEDOUT;
}
static int fnic_cleanup(struct fnic *fnic)
{
unsigned int i;
int err;
vnic_dev_disable(fnic->vdev);
for (i = 0; i < fnic->intr_count; i++)
vnic_intr_mask(&fnic->intr[i]);
for (i = 0; i < fnic->rq_count; i++) {
err = vnic_rq_disable(&fnic->rq[i]);
if (err)
return err;
}
for (i = 0; i < fnic->raw_wq_count; i++) {
err = vnic_wq_disable(&fnic->wq[i]);
if (err)
return err;
}
for (i = 0; i < fnic->wq_copy_count; i++) {
err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
if (err)
return err;
}
/* Clean up completed IOs and FCS frames */
fnic_wq_copy_cmpl_handler(fnic, io_completions);
fnic_wq_cmpl_handler(fnic, -1);
fnic_rq_cmpl_handler(fnic, -1);
/* Clean up the IOs and FCS frames that have not completed */
for (i = 0; i < fnic->raw_wq_count; i++)
vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_clean(&fnic->wq_copy[i],
fnic_wq_copy_cleanup_handler);
for (i = 0; i < fnic->cq_count; i++)
vnic_cq_clean(&fnic->cq[i]);
for (i = 0; i < fnic->intr_count; i++)
vnic_intr_clean(&fnic->intr[i]);
mempool_destroy(fnic->io_req_pool);
for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
mempool_destroy(fnic->io_sgl_pool[i]);
return 0;
}
static void fnic_iounmap(struct fnic *fnic)
{
if (fnic->bar0.vaddr)
iounmap(fnic->bar0.vaddr);
}
/**
* fnic_get_mac() - get assigned data MAC address for FIP code.
* @lport: local port.
*/
static u8 *fnic_get_mac(struct fc_lport *lport)
{
struct fnic *fnic = lport_priv(lport);
return fnic->data_src_addr;
}
static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
{
vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
}
static int fnic_scsi_drv_init(struct fnic *fnic)
{
struct Scsi_Host *host = fnic->lport->host;
/* Configure maximum outstanding IO reqs*/
if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD)
host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
max_t(u32, FNIC_MIN_IO_REQ,
fnic->config.io_throttle_count));
fnic->fnic_max_tag_id = host->can_queue;
host->max_lun = fnic->config.luns_per_tgt;
host->max_id = FNIC_MAX_FCP_TARGET;
host->max_cmd_len = FCOE_MAX_CMD_LEN;
host->nr_hw_queues = fnic->wq_copy_count;
if (host->nr_hw_queues > 1)
shost_printk(KERN_ERR, host,
"fnic: blk-mq is not supported");
host->nr_hw_queues = fnic->wq_copy_count = 1;
shost_printk(KERN_INFO, host,
"fnic: can_queue: %d max_lun: %llu",
host->can_queue, host->max_lun);
shost_printk(KERN_INFO, host,
"fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d",
host->max_id, host->max_cmd_len, host->nr_hw_queues);
return 0;
}
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct Scsi_Host *host;
struct fc_lport *lp;
struct fnic *fnic;
mempool_t *pool;
int err;
int i;
unsigned long flags;
/*
* Allocate SCSI Host and set up association between host,
* local port, and fnic
*/
lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
if (!lp) {
printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
err = -ENOMEM;
goto err_out;
}
host = lp->host;
fnic = lport_priv(lp);
fnic->lport = lp;
fnic->ctlr.lp = lp;
fnic->link_events = 0;
snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
host->host_no);
host->transportt = fnic_fc_transport;
fnic_stats_debugfs_init(fnic);
/* Setup PCI resources */
pci_set_drvdata(pdev, fnic);
fnic->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Cannot enable PCI device, aborting.\n");
goto err_out_free_hba;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Cannot enable PCI resources, aborting\n");
goto err_out_disable_device;
}
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
* limitation for the device. Try 47-bit first, and
* fail to 32-bit. Cisco VIC supports 47 bits only.
*/
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"No usable DMA configuration "
"aborting\n");
goto err_out_release_regions;
}
}
/* Map vNIC resources from BAR0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
shost_printk(KERN_ERR, fnic->lport->host,
"BAR0 not memory-map'able, aborting.\n");
err = -ENODEV;
goto err_out_release_regions;
}
fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
fnic->bar0.len = pci_resource_len(pdev, 0);
if (!fnic->bar0.vaddr) {
shost_printk(KERN_ERR, fnic->lport->host,
"Cannot memory-map BAR0 res hdr, "
"aborting.\n");
err = -ENODEV;
goto err_out_release_regions;
}
fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
if (!fnic->vdev) {
shost_printk(KERN_ERR, fnic->lport->host,
"vNIC registration failed, "
"aborting.\n");
err = -ENODEV;
goto err_out_iounmap;
}
err = vnic_dev_cmd_init(fnic->vdev);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"vnic_dev_cmd_init() returns %d, aborting\n",
err);
goto err_out_vnic_unregister;
}
err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"vNIC dev open failed, aborting.\n");
goto err_out_dev_cmd_deinit;
}
err = vnic_dev_init(fnic->vdev, 0);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"vNIC dev init failed, aborting.\n");
goto err_out_dev_close;
}
err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"vNIC get MAC addr failed \n");
goto err_out_dev_close;
}
/* set data_src for point-to-point mode and to keep it non-zero */
memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);
/* Get vNIC configuration */
err = fnic_get_vnic_config(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Get vNIC configuration failed, "
"aborting.\n");
goto err_out_dev_close;
}
fnic_scsi_drv_init(fnic);
fnic_get_res_counts(fnic);
err = fnic_set_intr_mode(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Failed to set intr mode, "
"aborting.\n");
goto err_out_dev_close;
}
err = fnic_alloc_vnic_resources(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Failed to alloc vNIC resources, "
"aborting.\n");
goto err_out_clear_intr;
}
/* initialize all fnic locks */
spin_lock_init(&fnic->fnic_lock);
for (i = 0; i < FNIC_WQ_MAX; i++)
spin_lock_init(&fnic->wq_lock[i]);
for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
spin_lock_init(&fnic->wq_copy_lock[i]);
fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
fnic->fw_ack_recd[i] = 0;
fnic->fw_ack_index[i] = -1;
}
for (i = 0; i < FNIC_IO_LOCKS; i++)
spin_lock_init(&fnic->io_req_lock[i]);
err = -ENOMEM;
fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
if (!fnic->io_req_pool)
goto err_out_free_resources;
pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
if (!pool)
goto err_out_free_ioreq_pool;
fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;
pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
if (!pool)
goto err_out_free_dflt_pool;
fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;
/* setup vlan config, hw inserts vlan header */
fnic->vlan_hw_insert = 1;
fnic->vlan_id = 0;
/* Initialize the FIP fcoe_ctrl struct */
fnic->ctlr.send = fnic_eth_send;
fnic->ctlr.update_mac = fnic_update_mac;
fnic->ctlr.get_src_addr = fnic_get_mac;
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
shost_printk(KERN_INFO, fnic->lport->host,
"firmware supports FIP\n");
/* enable directed and multicast */
vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
fnic->set_vlan = fnic_set_vlan;
fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
skb_queue_head_init(&fnic->fip_frame_queue);
INIT_LIST_HEAD(&fnic->evlist);
INIT_LIST_HEAD(&fnic->vlans);
} else {
shost_printk(KERN_INFO, fnic->lport->host,
"firmware uses non-FIP mode\n");
fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
fnic->ctlr.state = FIP_ST_NON_FIP;
}
fnic->state = FNIC_IN_FC_MODE;
atomic_set(&fnic->in_flight, 0);
fnic->state_flags = FNIC_FLAGS_NONE;
/* Enable hardware stripping of vlan header on ingress */
fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
/* Setup notification buffer area */
err = fnic_notify_set(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Failed to alloc notify buffer, aborting.\n");
goto err_out_free_max_pool;
}
/* Setup notify timer when using MSI interrupts */
if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);
/* allocate RQ buffers and post them to RQ*/
for (i = 0; i < fnic->rq_count; i++) {
vnic_rq_enable(&fnic->rq[i]);
err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"fnic_alloc_rq_frame can't alloc "
"frame\n");
goto err_out_free_rq_buf;
}
}
/*
* Initialization done with PCI system, hardware, firmware.
* Add host to SCSI
*/
err = scsi_add_host(lp->host, &pdev->dev);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"fnic: scsi_add_host failed...exiting\n");
goto err_out_free_rq_buf;
}
/* Start local port initiatialization */
lp->link_up = 0;
lp->max_retry_count = fnic->config.flogi_retries;
lp->max_rport_retry_count = fnic->config.plogi_retries;
lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
FCP_SPPF_CONF_COMPL);
if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
lp->service_params |= FCP_SPPF_RETRY;
lp->boot_time = jiffies;
lp->e_d_tov = fnic->config.ed_tov;
lp->r_a_tov = fnic->config.ra_tov;
lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
fc_set_wwnn(lp, fnic->config.node_wwn);
fc_set_wwpn(lp, fnic->config.port_wwn);
fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);
if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
FCPIO_HOST_EXCH_RANGE_END, NULL)) {
err = -ENOMEM;
goto err_out_remove_scsi_host;
}
fc_lport_init_stats(lp);
fnic->stats_reset_time = jiffies;
fc_lport_config(lp);
if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
sizeof(struct fc_frame_header))) {
err = -EINVAL;
goto err_out_free_exch_mgr;
}
fc_host_maxframe_size(lp->host) = lp->mfs;
fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;
sprintf(fc_host_symbolic_name(lp->host),
DRV_NAME " v" DRV_VERSION " over %s", fnic->name);
spin_lock_irqsave(&fnic_list_lock, flags);
list_add_tail(&fnic->list, &fnic_list);
spin_unlock_irqrestore(&fnic_list_lock, flags);
INIT_WORK(&fnic->link_work, fnic_handle_link);
INIT_WORK(&fnic->frame_work, fnic_handle_frame);
skb_queue_head_init(&fnic->frame_queue);
skb_queue_head_init(&fnic->tx_queue);
/* Enable all queues */
for (i = 0; i < fnic->raw_wq_count; i++)
vnic_wq_enable(&fnic->wq[i]);
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_enable(&fnic->wq_copy[i]);
fc_fabric_login(lp);
err = fnic_request_intr(fnic);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"Unable to request irq.\n");
goto err_out_free_exch_mgr;
}
vnic_dev_enable(fnic->vdev);
for (i = 0; i < fnic->intr_count; i++)
vnic_intr_unmask(&fnic->intr[i]);
fnic_notify_timer_start(fnic);
return 0;
err_out_free_exch_mgr:
fc_exch_mgr_free(lp);
err_out_remove_scsi_host:
fc_remove_host(lp->host);
scsi_remove_host(lp->host);
err_out_free_rq_buf:
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
fnic_free_vnic_resources(fnic);
err_out_clear_intr:
fnic_clear_intr_mode(fnic);
err_out_dev_close:
vnic_dev_close(fnic->vdev);
err_out_dev_cmd_deinit:
err_out_vnic_unregister:
vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
fnic_iounmap(fnic);
err_out_release_regions:
pci_release_regions(pdev);
err_out_disable_device:
pci_disable_device(pdev);
err_out_free_hba:
fnic_stats_debugfs_remove(fnic);
scsi_host_put(lp->host);
err_out:
return err;
}
static void fnic_remove(struct pci_dev *pdev)
{
struct fnic *fnic = pci_get_drvdata(pdev);
struct fc_lport *lp = fnic->lport;
unsigned long flags;
/*
* Mark state so that the workqueue thread stops forwarding
* received frames and link events to the local port. ISR and
* other threads that can queue work items will also stop
* creating work items on the fnic workqueue
*/
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->stop_rx_link_events = 1;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
del_timer_sync(&fnic->notify_timer);
/*
* Flush the fnic event queue. After this call, there should
* be no event queued for this fnic device in the workqueue
*/
flush_workqueue(fnic_event_queue);
skb_queue_purge(&fnic->frame_queue);
skb_queue_purge(&fnic->tx_queue);
if (fnic->config.flags & VFCF_FIP_CAPABLE) {
del_timer_sync(&fnic->fip_timer);
skb_queue_purge(&fnic->fip_frame_queue);
fnic_fcoe_reset_vlans(fnic);
fnic_fcoe_evlist_free(fnic);
}
/*
* Log off the fabric. This stops all remote ports, dns port,
* logs off the fabric. This flushes all rport, disc, lport work
* before returning
*/
fc_fabric_logoff(fnic->lport);
spin_lock_irqsave(&fnic->fnic_lock, flags);
fnic->in_remove = 1;
spin_unlock_irqrestore(&fnic->fnic_lock, flags);
fcoe_ctlr_destroy(&fnic->ctlr);
fc_lport_destroy(lp);
fnic_stats_debugfs_remove(fnic);
/*
* This stops the fnic device, masks all interrupts. Completed
* CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are
* cleaned up
*/
fnic_cleanup(fnic);
BUG_ON(!skb_queue_empty(&fnic->frame_queue));
BUG_ON(!skb_queue_empty(&fnic->tx_queue));
spin_lock_irqsave(&fnic_list_lock, flags);
list_del(&fnic->list);
spin_unlock_irqrestore(&fnic_list_lock, flags);
fc_remove_host(fnic->lport->host);
scsi_remove_host(fnic->lport->host);
fc_exch_mgr_free(fnic->lport);
vnic_dev_notify_unset(fnic->vdev);
fnic_free_intr(fnic);
fnic_free_vnic_resources(fnic);
fnic_clear_intr_mode(fnic);
vnic_dev_close(fnic->vdev);
vnic_dev_unregister(fnic->vdev);
fnic_iounmap(fnic);
pci_release_regions(pdev);
pci_disable_device(pdev);
scsi_host_put(lp->host);
}
static struct pci_driver fnic_driver = {
.name = DRV_NAME,
.id_table = fnic_id_table,
.probe = fnic_probe,
.remove = fnic_remove,
};
static int __init fnic_init_module(void)
{
size_t len;
int err = 0;
printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
/* Create debugfs entries for fnic */
err = fnic_debugfs_init();
if (err < 0) {
printk(KERN_ERR PFX "Failed to create fnic directory "
"for tracing and stats logging\n");
fnic_debugfs_terminate();
}
/* Allocate memory for trace buffer */
err = fnic_trace_buf_init();
if (err < 0) {
printk(KERN_ERR PFX
"Trace buffer initialization Failed. "
"Fnic Tracing utility is disabled\n");
fnic_trace_free();
}
/* Allocate memory for fc trace buffer */
err = fnic_fc_trace_init();
if (err < 0) {
printk(KERN_ERR PFX "FC trace buffer initialization Failed "
"FC frame tracing utility is disabled\n");
fnic_fc_trace_free();
}
/* Create a cache for allocation of default size sgls */
len = sizeof(struct fnic_dflt_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
("fnic_sgl_dflt", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN,
NULL);
if (!fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]) {
printk(KERN_ERR PFX "failed to create fnic dflt sgl slab\n");
err = -ENOMEM;
goto err_create_fnic_sgl_slab_dflt;
}
/* Create a cache for allocation of max size sgls*/
len = sizeof(struct fnic_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create
("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN,
SLAB_HWCACHE_ALIGN,
NULL);
if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) {
printk(KERN_ERR PFX "failed to create fnic max sgl slab\n");
err = -ENOMEM;
goto err_create_fnic_sgl_slab_max;
}
/* Create a cache of io_req structs for use via mempool */
fnic_io_req_cache = kmem_cache_create("fnic_io_req",
sizeof(struct fnic_io_req),
0, SLAB_HWCACHE_ALIGN, NULL);
if (!fnic_io_req_cache) {
printk(KERN_ERR PFX "failed to create fnic io_req slab\n");
err = -ENOMEM;
goto err_create_fnic_ioreq_slab;
}
fnic_event_queue = create_singlethread_workqueue("fnic_event_wq");
if (!fnic_event_queue) {
printk(KERN_ERR PFX "fnic work queue create failed\n");
err = -ENOMEM;
goto err_create_fnic_workq;
}
fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
if (!fnic_fip_queue) {
printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
err = -ENOMEM;
goto err_create_fip_workq;
}
fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
if (!fnic_fc_transport) {
printk(KERN_ERR PFX "fc_attach_transport error\n");
err = -ENOMEM;
goto err_fc_transport;
}
/* register the driver with PCI system */
err = pci_register_driver(&fnic_driver);
if (err < 0) {
printk(KERN_ERR PFX "pci register error\n");
goto err_pci_register;
}
return err;
err_pci_register:
fc_release_transport(fnic_fc_transport);
err_fc_transport:
destroy_workqueue(fnic_fip_queue);
err_create_fip_workq:
destroy_workqueue(fnic_event_queue);
err_create_fnic_workq:
kmem_cache_destroy(fnic_io_req_cache);
err_create_fnic_ioreq_slab:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
fnic_trace_free();
fnic_fc_trace_free();
fnic_debugfs_terminate();
return err;
}
static void __exit fnic_cleanup_module(void)
{
pci_unregister_driver(&fnic_driver);
destroy_workqueue(fnic_event_queue);
if (fnic_fip_queue)
destroy_workqueue(fnic_fip_queue);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport);
fnic_trace_free();
fnic_fc_trace_free();
fnic_debugfs_terminate();
}
module_init(fnic_init_module);
module_exit(fnic_cleanup_module);
| linux-master | drivers/scsi/fnic/fnic_main.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
#include "vnic_resource.h"
#include "vnic_devcmd.h"
#include "vnic_dev.h"
#include "vnic_stats.h"
#include "vnic_wq.h"
struct devcmd2_controller {
struct vnic_wq_ctrl *wq_ctrl;
struct vnic_dev_ring results_ring;
struct vnic_wq wq;
struct vnic_devcmd2 *cmd_ring;
struct devcmd2_result *result;
u16 next_result;
u16 result_size;
int color;
};
enum vnic_proxy_type {
PROXY_NONE,
PROXY_BY_BDF,
PROXY_BY_INDEX,
};
struct vnic_res {
void __iomem *vaddr;
unsigned int count;
};
struct vnic_dev {
void *priv;
struct pci_dev *pdev;
struct vnic_res res[RES_TYPE_MAX];
enum vnic_dev_intr_mode intr_mode;
struct vnic_devcmd __iomem *devcmd;
struct vnic_devcmd_notify *notify;
struct vnic_devcmd_notify notify_copy;
dma_addr_t notify_pa;
u32 *linkstatus;
dma_addr_t linkstatus_pa;
struct vnic_stats *stats;
dma_addr_t stats_pa;
struct vnic_devcmd_fw_info *fw_info;
dma_addr_t fw_info_pa;
enum vnic_proxy_type proxy;
u32 proxy_index;
u64 args[VNIC_DEVCMD_NARGS];
struct devcmd2_controller *devcmd2;
int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait);
};
#define VNIC_MAX_RES_HDR_SIZE \
(sizeof(struct vnic_resource_header) + \
sizeof(struct vnic_resource) * RES_TYPE_MAX)
#define VNIC_RES_STRIDE 128
void *vnic_dev_priv(struct vnic_dev *vdev)
{
return vdev->priv;
}
static int vnic_dev_discover_res(struct vnic_dev *vdev,
struct vnic_dev_bar *bar)
{
struct vnic_resource_header __iomem *rh;
struct vnic_resource __iomem *r;
u8 type;
if (bar->len < VNIC_MAX_RES_HDR_SIZE) {
printk(KERN_ERR "vNIC BAR0 res hdr length error\n");
return -EINVAL;
}
rh = bar->vaddr;
if (!rh) {
printk(KERN_ERR "vNIC BAR0 res hdr not mem-mapped\n");
return -EINVAL;
}
if (ioread32(&rh->magic) != VNIC_RES_MAGIC ||
ioread32(&rh->version) != VNIC_RES_VERSION) {
printk(KERN_ERR "vNIC BAR0 res magic/version error "
"exp (%lx/%lx) curr (%x/%x)\n",
VNIC_RES_MAGIC, VNIC_RES_VERSION,
ioread32(&rh->magic), ioread32(&rh->version));
return -EINVAL;
}
r = (struct vnic_resource __iomem *)(rh + 1);
while ((type = ioread8(&r->type)) != RES_TYPE_EOL) {
u8 bar_num = ioread8(&r->bar);
u32 bar_offset = ioread32(&r->bar_offset);
u32 count = ioread32(&r->count);
u32 len;
r++;
if (bar_num != 0) /* only mapping in BAR0 resources */
continue;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
/* each count is stride bytes long */
len = count * VNIC_RES_STRIDE;
if (len + bar_offset > bar->len) {
printk(KERN_ERR "vNIC BAR0 resource %d "
"out-of-bounds, offset 0x%x + "
"size 0x%x > bar len 0x%lx\n",
type, bar_offset,
len,
bar->len);
return -EINVAL;
}
break;
case RES_TYPE_INTR_PBA_LEGACY:
case RES_TYPE_DEVCMD2:
case RES_TYPE_DEVCMD:
len = count;
break;
default:
continue;
}
vdev->res[type].count = count;
vdev->res[type].vaddr = (char __iomem *)bar->vaddr + bar_offset;
}
return 0;
}
unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
enum vnic_res_type type)
{
return vdev->res[type].count;
}
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index)
{
if (!vdev->res[type].vaddr)
return NULL;
switch (type) {
case RES_TYPE_WQ:
case RES_TYPE_RQ:
case RES_TYPE_CQ:
case RES_TYPE_INTR_CTRL:
return (char __iomem *)vdev->res[type].vaddr +
index * VNIC_RES_STRIDE;
default:
return (char __iomem *)vdev->res[type].vaddr;
}
}
unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count,
unsigned int desc_size)
{
/* The base address of the desc rings must be 512 byte aligned.
* Descriptor count is aligned to groups of 32 descriptors. A
* count of 0 means the maximum 4096 descriptors. Descriptor
* size is aligned to 16 bytes.
*/
unsigned int count_align = 32;
unsigned int desc_align = 16;
ring->base_align = 512;
if (desc_count == 0)
desc_count = 4096;
ring->desc_count = ALIGN(desc_count, count_align);
ring->desc_size = ALIGN(desc_size, desc_align);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
return ring->size_unaligned;
}
void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
{
memset(ring->descs, 0, ring->size);
}
int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
ring->size_unaligned,
&ring->base_addr_unaligned, GFP_KERNEL);
if (!ring->descs_unaligned) {
printk(KERN_ERR
"Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
return -ENOMEM;
}
ring->base_addr = ALIGN(ring->base_addr_unaligned,
ring->base_align);
ring->descs = (u8 *)ring->descs_unaligned +
(ring->base_addr - ring->base_addr_unaligned);
vnic_dev_clear_desc_ring(ring);
ring->desc_avail = ring->desc_count - 1;
return 0;
}
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
dma_free_coherent(&vdev->pdev->dev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
ring->descs = NULL;
}
}
static int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait)
{
struct vnic_devcmd __iomem *devcmd = vdev->devcmd;
int delay;
u32 status;
static const int dev_cmd_err[] = {
/* convert from fw's version of error.h to host's version */
0, /* ERR_SUCCESS */
EINVAL, /* ERR_EINVAL */
EFAULT, /* ERR_EFAULT */
EPERM, /* ERR_EPERM */
EBUSY, /* ERR_EBUSY */
};
int err;
u64 *a0 = &vdev->args[0];
u64 *a1 = &vdev->args[1];
status = ioread32(&devcmd->status);
if (status & STAT_BUSY) {
printk(KERN_ERR "Busy devcmd %d\n", _CMD_N(cmd));
return -EBUSY;
}
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
writeq(*a0, &devcmd->args[0]);
writeq(*a1, &devcmd->args[1]);
wmb();
}
iowrite32(cmd, &devcmd->cmd);
if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
return 0;
for (delay = 0; delay < wait; delay++) {
udelay(100);
status = ioread32(&devcmd->status);
if (!(status & STAT_BUSY)) {
if (status & STAT_ERROR) {
err = dev_cmd_err[(int)readq(&devcmd->args[0])];
printk(KERN_ERR "Error %d devcmd %d\n",
err, _CMD_N(cmd));
return -err;
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
rmb();
*a0 = readq(&devcmd->args[0]);
*a1 = readq(&devcmd->args[1]);
}
return 0;
}
}
printk(KERN_ERR "Timedout devcmd %d\n", _CMD_N(cmd));
return -ETIMEDOUT;
}
static int vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
struct devcmd2_result *result;
u8 color;
unsigned int i;
int delay;
int err;
u32 fetch_index;
u32 posted;
u32 new_posted;
posted = ioread32(&dc2c->wq_ctrl->posted_index);
fetch_index = ioread32(&dc2c->wq_ctrl->fetch_index);
if (posted == 0xFFFFFFFF || fetch_index == 0xFFFFFFFF) {
/* Hardware surprise removal: return error */
pr_err("%s: devcmd2 invalid posted or fetch index on cmd %d\n",
pci_name(vdev->pdev), _CMD_N(cmd));
pr_err("%s: fetch index: %u, posted index: %u\n",
pci_name(vdev->pdev), fetch_index, posted);
return -ENODEV;
}
new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
if (new_posted == fetch_index) {
pr_err("%s: devcmd2 wq full while issuing cmd %d\n",
pci_name(vdev->pdev), _CMD_N(cmd));
pr_err("%s: fetch index: %u, posted index: %u\n",
pci_name(vdev->pdev), fetch_index, posted);
return -EBUSY;
}
dc2c->cmd_ring[posted].cmd = cmd;
dc2c->cmd_ring[posted].flags = 0;
if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
dc2c->cmd_ring[posted].args[i] = vdev->args[i];
}
/* Adding write memory barrier prevents compiler and/or CPU
* reordering, thus avoiding descriptor posting before
* descriptor is initialized. Otherwise, hardware can read
* stale descriptor fields.
*/
wmb();
iowrite32(new_posted, &dc2c->wq_ctrl->posted_index);
if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
return 0;
result = dc2c->result + dc2c->next_result;
color = dc2c->color;
dc2c->next_result++;
if (dc2c->next_result == dc2c->result_size) {
dc2c->next_result = 0;
dc2c->color = dc2c->color ? 0 : 1;
}
for (delay = 0; delay < wait; delay++) {
udelay(100);
if (result->color == color) {
if (result->error) {
err = -(int) result->error;
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
pr_err("%s:Error %d devcmd %d\n",
pci_name(vdev->pdev),
err, _CMD_N(cmd));
return err;
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
rmb(); /*prevent reorder while reding result*/
for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
vdev->args[i] = result->results[i];
}
return 0;
}
}
pr_err("%s:Timed out devcmd %d\n", pci_name(vdev->pdev), _CMD_N(cmd));
return -ETIMEDOUT;
}
static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
{
vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
if (!vdev->devcmd)
return -ENODEV;
vdev->devcmd_rtn = &vnic_dev_cmd1;
return 0;
}
static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
{
int err;
unsigned int fetch_index;
if (vdev->devcmd2)
return 0;
vdev->devcmd2 = kzalloc(sizeof(*vdev->devcmd2), GFP_ATOMIC);
if (!vdev->devcmd2)
return -ENOMEM;
vdev->devcmd2->color = 1;
vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
err = vnic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq,
DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
if (err)
goto err_free_devcmd2;
fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
pr_err("error in devcmd2 init");
err = -ENODEV;
goto err_free_wq;
}
/*
* Don't change fetch_index ever and
* set posted_index same as fetch_index
* when setting up the WQ for devcmd2.
*/
vnic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index,
fetch_index, 0, 0);
vnic_wq_enable(&vdev->devcmd2->wq);
err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
if (err)
goto err_disable_wq;
vdev->devcmd2->result =
(struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
vdev->devcmd2->cmd_ring =
(struct vnic_devcmd2 *) vdev->devcmd2->wq.ring.descs;
vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
vdev->args[0] = (u64) vdev->devcmd2->results_ring.base_addr |
VNIC_PADDR_TARGET;
vdev->args[1] = DEVCMD2_RING_SIZE;
err = vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
if (err)
goto err_free_desc_ring;
vdev->devcmd_rtn = &vnic_dev_cmd2;
return 0;
err_free_desc_ring:
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
err_disable_wq:
vnic_wq_disable(&vdev->devcmd2->wq);
err_free_wq:
vnic_wq_free(&vdev->devcmd2->wq);
err_free_devcmd2:
kfree(vdev->devcmd2);
vdev->devcmd2 = NULL;
return err;
}
static void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
{
vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
vnic_wq_disable(&vdev->devcmd2->wq);
vnic_wq_free(&vdev->devcmd2->wq);
kfree(vdev->devcmd2);
vdev->devcmd2 = NULL;
vdev->devcmd_rtn = &vnic_dev_cmd1;
}
static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
{
int err;
vdev->args[0] = *a0;
vdev->args[1] = *a1;
err = (*vdev->devcmd_rtn)(vdev, cmd, wait);
*a0 = vdev->args[0];
*a1 = vdev->args[1];
return err;
}
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait)
{
memset(vdev->args, 0, sizeof(vdev->args));
switch (vdev->proxy) {
case PROXY_NONE:
default:
return vnic_dev_cmd_no_proxy(vdev, cmd, a0, a1, wait);
}
}
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info)
{
u64 a0, a1 = 0;
int wait = 1000;
int err = 0;
if (!vdev->fw_info) {
vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
&vdev->fw_info_pa, GFP_KERNEL);
if (!vdev->fw_info)
return -ENOMEM;
a0 = vdev->fw_info_pa;
/* only get fw_info once and cache it */
err = vnic_dev_cmd(vdev, CMD_MCPU_FW_INFO, &a0, &a1, wait);
}
*fw_info = vdev->fw_info;
return err;
}
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, unsigned int size,
void *value)
{
u64 a0, a1;
int wait = 1000;
int err;
a0 = offset;
a1 = size;
err = vnic_dev_cmd(vdev, CMD_DEV_SPEC, &a0, &a1, wait);
switch (size) {
case 1:
*(u8 *)value = (u8)a0;
break;
case 2:
*(u16 *)value = (u16)a0;
break;
case 4:
*(u32 *)value = (u32)a0;
break;
case 8:
*(u64 *)value = a0;
break;
default:
BUG();
break;
}
return err;
}
int vnic_dev_stats_clear(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_STATS_CLEAR, &a0, &a1, wait);
}
int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
{
u64 a0, a1;
int wait = 1000;
if (!vdev->stats) {
vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
if (!vdev->stats)
return -ENOMEM;
}
*stats = vdev->stats;
a0 = vdev->stats_pa;
a1 = sizeof(struct vnic_stats);
return vnic_dev_cmd(vdev, CMD_STATS_DUMP, &a0, &a1, wait);
}
int vnic_dev_close(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_CLOSE, &a0, &a1, wait);
}
int vnic_dev_enable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_ENABLE, &a0, &a1, wait);
}
int vnic_dev_disable(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_DISABLE, &a0, &a1, wait);
}
int vnic_dev_open(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_OPEN, &a0, &a1, wait);
}
int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
*done = 0;
err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
if (err)
return err;
*done = (a0 == 0);
return 0;
}
int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_SOFT_RESET, &a0, &a1, wait);
}
int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
int err;
*done = 0;
err = vnic_dev_cmd(vdev, CMD_SOFT_RESET_STATUS, &a0, &a1, wait);
if (err)
return err;
*done = (a0 == 0);
return 0;
}
int vnic_dev_hang_notify(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait);
}
int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
{
u64 a[2] = {};
int wait = 1000;
int err, i;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = 0;
err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait);
if (err)
return err;
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = ((u8 *)&a)[i];
return 0;
}
void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti)
{
u64 a0, a1 = 0;
int wait = 1000;
int err;
a0 = (directed ? CMD_PFILTER_DIRECTED : 0) |
(multicast ? CMD_PFILTER_MULTICAST : 0) |
(broadcast ? CMD_PFILTER_BROADCAST : 0) |
(promisc ? CMD_PFILTER_PROMISCUOUS : 0) |
(allmulti ? CMD_PFILTER_ALL_MULTICAST : 0);
err = vnic_dev_cmd(vdev, CMD_PACKET_FILTER, &a0, &a1, wait);
if (err)
printk(KERN_ERR "Can't set packet filter\n");
}
void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
((u8 *)&a)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait);
if (err)
pr_err("Can't add addr [%pM], %d\n", addr, err);
}
void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
{
u64 a[2] = {};
int wait = 1000;
int err;
int i;
for (i = 0; i < ETH_ALEN; i++)
((u8 *)&a)[i] = addr[i];
err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait);
if (err)
pr_err("Can't del addr [%pM], %d\n", addr, err);
}
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
{
u64 a0, a1;
int wait = 1000;
if (!vdev->notify) {
vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
&vdev->notify_pa, GFP_KERNEL);
if (!vdev->notify)
return -ENOMEM;
}
a0 = vdev->notify_pa;
a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
a1 += sizeof(struct vnic_devcmd_notify);
return vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
void vnic_dev_notify_unset(struct vnic_dev *vdev)
{
u64 a0, a1;
int wait = 1000;
a0 = 0; /* paddr = 0 to unset notify buffer */
a1 = 0x0000ffff00000000ULL; /* intr num = -1 to unreg for intr */
a1 += sizeof(struct vnic_devcmd_notify);
vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
}
static int vnic_dev_notify_ready(struct vnic_dev *vdev)
{
u32 *words;
unsigned int nwords = sizeof(struct vnic_devcmd_notify) / 4;
unsigned int i;
u32 csum;
if (!vdev->notify)
return 0;
do {
csum = 0;
memcpy(&vdev->notify_copy, vdev->notify,
sizeof(struct vnic_devcmd_notify));
words = (u32 *)&vdev->notify_copy;
for (i = 1; i < nwords; i++)
csum += words[i];
} while (csum != words[0]);
return 1;
}
int vnic_dev_init(struct vnic_dev *vdev, int arg)
{
u64 a0 = (u32)arg, a1 = 0;
int wait = 1000;
return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait);
}
u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan)
{
u64 a0 = new_default_vlan, a1 = 0;
int wait = 1000;
int old_vlan = 0;
old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait);
return (u16)old_vlan;
}
int vnic_dev_link_status(struct vnic_dev *vdev)
{
if (vdev->linkstatus)
return *vdev->linkstatus;
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.link_state;
}
u32 vnic_dev_port_speed(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.port_speed;
}
u32 vnic_dev_msg_lvl(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.msglvl;
}
u32 vnic_dev_mtu(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.mtu;
}
u32 vnic_dev_link_down_cnt(struct vnic_dev *vdev)
{
if (!vnic_dev_notify_ready(vdev))
return 0;
return vdev->notify_copy.link_down_cnt;
}
void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
enum vnic_dev_intr_mode intr_mode)
{
vdev->intr_mode = intr_mode;
}
enum vnic_dev_intr_mode vnic_dev_get_intr_mode(
struct vnic_dev *vdev)
{
return vdev->intr_mode;
}
void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
dma_free_coherent(&vdev->pdev->dev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
if (vdev->devcmd2)
vnic_dev_deinit_devcmd2(vdev);
kfree(vdev);
}
}
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar)
{
if (!vdev) {
vdev = kzalloc(sizeof(struct vnic_dev), GFP_KERNEL);
if (!vdev)
return NULL;
}
vdev->priv = priv;
vdev->pdev = pdev;
if (vnic_dev_discover_res(vdev, bar))
goto err_out;
return vdev;
err_out:
vnic_dev_unregister(vdev);
return NULL;
}
int vnic_dev_cmd_init(struct vnic_dev *vdev)
{
int err;
void *p;
p = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
if (p) {
pr_err("fnic: DEVCMD2 resource found!\n");
err = vnic_dev_init_devcmd2(vdev);
} else {
pr_err("fnic: DEVCMD2 not found, fall back to Devcmd\n");
err = vnic_dev_init_devcmd1(vdev);
}
return err;
}
| linux-master | drivers/scsi/fnic/vnic_dev.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_rq.h"
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
struct vnic_rq_buf *buf;
unsigned int i, j, count = rq->ring.desc_count;
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!rq->bufs[i]) {
printk(KERN_ERR "Failed to alloc rq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = rq->bufs[i];
for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES; j++) {
buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES + j;
buf->desc = (u8 *)rq->ring.descs +
rq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = rq->bufs[0];
break;
} else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES) {
buf->next = rq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
rq->to_use = rq->to_clean = rq->bufs[0];
rq->buf_index = 0;
return 0;
}
void vnic_rq_free(struct vnic_rq *rq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = rq->vdev;
vnic_dev_free_desc_ring(vdev, &rq->ring);
for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
kfree(rq->bufs[i]);
rq->bufs[i] = NULL;
}
rq->ctrl = NULL;
}
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
rq->index = index;
rq->vdev = vdev;
rq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_RQ, index);
if (!rq->ctrl) {
printk(KERN_ERR "Failed to hook RQ[%d] resource\n", index);
return -EINVAL;
}
vnic_rq_disable(rq);
err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_rq_alloc_bufs(rq);
if (err) {
vnic_rq_free(rq);
return err;
}
return 0;
}
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
u32 fetch_index;
paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &rq->ctrl->ring_base);
iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
iowrite32(cq_index, &rq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
iowrite32(0, &rq->ctrl->dropped_packet_count);
iowrite32(0, &rq->ctrl->error_status);
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
rq->to_use = rq->to_clean =
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
iowrite32(fetch_index, &rq->ctrl->posted_index);
rq->buf_index = 0;
}
unsigned int vnic_rq_error_status(struct vnic_rq *rq)
{
return ioread32(&rq->ctrl->error_status);
}
void vnic_rq_enable(struct vnic_rq *rq)
{
iowrite32(1, &rq->ctrl->enable);
}
int vnic_rq_disable(struct vnic_rq *rq)
{
unsigned int wait;
iowrite32(0, &rq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&rq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
return -ETIMEDOUT;
}
void vnic_rq_clean(struct vnic_rq *rq,
void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
{
struct vnic_rq_buf *buf;
u32 fetch_index;
WARN_ON(ioread32(&rq->ctrl->enable));
buf = rq->to_clean;
while (vnic_rq_desc_used(rq) > 0) {
(*buf_clean)(rq, buf);
buf = rq->to_clean = buf->next;
rq->ring.desc_avail++;
}
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
rq->to_use = rq->to_clean =
&rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES]
[fetch_index % VNIC_RQ_BUF_BLK_ENTRIES];
iowrite32(fetch_index, &rq->ctrl->posted_index);
rq->buf_index = 0;
vnic_dev_clear_desc_ring(&rq->ring);
}
| linux-master | drivers/scsi/fnic/vnic_rq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "wq_enet_desc.h"
#include "rq_enet_desc.h"
#include "cq_enet_desc.h"
#include "vnic_resource.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
#include "vnic_cq.h"
#include "vnic_intr.h"
#include "vnic_stats.h"
#include "vnic_nic.h"
#include "fnic.h"
int fnic_get_vnic_config(struct fnic *fnic)
{
struct vnic_fc_config *c = &fnic->config;
int err;
#define GET_CONFIG(m) \
do { \
err = vnic_dev_spec(fnic->vdev, \
offsetof(struct vnic_fc_config, m), \
sizeof(c->m), &c->m); \
if (err) { \
shost_printk(KERN_ERR, fnic->lport->host, \
"Error getting %s, %d\n", #m, \
err); \
return err; \
} \
} while (0);
GET_CONFIG(node_wwn);
GET_CONFIG(port_wwn);
GET_CONFIG(wq_enet_desc_count);
GET_CONFIG(wq_copy_desc_count);
GET_CONFIG(rq_desc_count);
GET_CONFIG(maxdatafieldsize);
GET_CONFIG(ed_tov);
GET_CONFIG(ra_tov);
GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(flags);
GET_CONFIG(flogi_retries);
GET_CONFIG(flogi_timeout);
GET_CONFIG(plogi_retries);
GET_CONFIG(plogi_timeout);
GET_CONFIG(io_throttle_count);
GET_CONFIG(link_down_timeout);
GET_CONFIG(port_down_timeout);
GET_CONFIG(port_down_io_retries);
GET_CONFIG(luns_per_tgt);
c->wq_enet_desc_count =
min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
c->wq_enet_desc_count));
c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);
c->wq_copy_desc_count =
min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX,
max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN,
c->wq_copy_desc_count));
c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16);
c->rq_desc_count =
min_t(u32, VNIC_FNIC_RQ_DESCS_MAX,
max_t(u32, VNIC_FNIC_RQ_DESCS_MIN,
c->rq_desc_count));
c->rq_desc_count = ALIGN(c->rq_desc_count, 16);
c->maxdatafieldsize =
min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX,
max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN,
c->maxdatafieldsize));
c->ed_tov =
min_t(u32, VNIC_FNIC_EDTOV_MAX,
max_t(u32, VNIC_FNIC_EDTOV_MIN,
c->ed_tov));
c->ra_tov =
min_t(u32, VNIC_FNIC_RATOV_MAX,
max_t(u32, VNIC_FNIC_RATOV_MIN,
c->ra_tov));
c->flogi_retries =
min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries);
c->flogi_timeout =
min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX,
max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN,
c->flogi_timeout));
c->plogi_retries =
min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries);
c->plogi_timeout =
min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX,
max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN,
c->plogi_timeout));
c->io_throttle_count =
min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX,
max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN,
c->io_throttle_count));
c->link_down_timeout =
min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX,
c->link_down_timeout);
c->port_down_timeout =
min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX,
c->port_down_timeout);
c->port_down_io_retries =
min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX,
c->port_down_io_retries);
c->luns_per_tgt =
min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX,
max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN,
c->luns_per_tgt));
c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
c->intr_timer_type = c->intr_timer_type;
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC MAC addr %pM "
"wq/wq_copy/rq %d/%d/%d\n",
fnic->ctlr.ctl_src_addr,
c->wq_enet_desc_count, c->wq_copy_desc_count,
c->rq_desc_count);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC node wwn %llx port wwn %llx\n",
c->node_wwn, c->port_wwn);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC ed_tov %d ra_tov %d\n",
c->ed_tov, c->ra_tov);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC mtu %d intr timer %d\n",
c->maxdatafieldsize, c->intr_timer);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC flags 0x%x luns per tgt %d\n",
c->flags, c->luns_per_tgt);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC flogi_retries %d flogi timeout %d\n",
c->flogi_retries, c->flogi_timeout);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC plogi retries %d plogi timeout %d\n",
c->plogi_retries, c->plogi_timeout);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC io throttle count %d link dn timeout %d\n",
c->io_throttle_count, c->link_down_timeout);
shost_printk(KERN_INFO, fnic->lport->host,
"vNIC port dn io retries %d port dn timeout %d\n",
c->port_down_io_retries, c->port_down_timeout);
return 0;
}
int fnic_set_nic_config(struct fnic *fnic, u8 rss_default_cpu,
u8 rss_hash_type,
u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable,
u8 tso_ipid_split_en, u8 ig_vlan_strip_en)
{
u64 a0, a1;
u32 nic_cfg;
int wait = 1000;
vnic_set_nic_cfg(&nic_cfg, rss_default_cpu,
rss_hash_type, rss_hash_bits, rss_base_cpu,
rss_enable, tso_ipid_split_en, ig_vlan_strip_en);
a0 = nic_cfg;
a1 = 0;
return vnic_dev_cmd(fnic->vdev, CMD_NIC_CFG, &a0, &a1, wait);
}
void fnic_get_res_counts(struct fnic *fnic)
{
fnic->wq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_WQ);
fnic->raw_wq_count = fnic->wq_count - 1;
fnic->wq_copy_count = fnic->wq_count - fnic->raw_wq_count;
fnic->rq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_RQ);
fnic->cq_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_CQ);
fnic->intr_count = vnic_dev_get_res_count(fnic->vdev,
RES_TYPE_INTR_CTRL);
}
void fnic_free_vnic_resources(struct fnic *fnic)
{
unsigned int i;
for (i = 0; i < fnic->raw_wq_count; i++)
vnic_wq_free(&fnic->wq[i]);
for (i = 0; i < fnic->wq_copy_count; i++)
vnic_wq_copy_free(&fnic->wq_copy[i]);
for (i = 0; i < fnic->rq_count; i++)
vnic_rq_free(&fnic->rq[i]);
for (i = 0; i < fnic->cq_count; i++)
vnic_cq_free(&fnic->cq[i]);
for (i = 0; i < fnic->intr_count; i++)
vnic_intr_free(&fnic->intr[i]);
}
int fnic_alloc_vnic_resources(struct fnic *fnic)
{
enum vnic_dev_intr_mode intr_mode;
unsigned int mask_on_assertion;
unsigned int interrupt_offset;
unsigned int error_interrupt_enable;
unsigned int error_interrupt_offset;
unsigned int i, cq_index;
unsigned int wq_copy_cq_desc_count;
int err;
intr_mode = vnic_dev_get_intr_mode(fnic->vdev);
shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
"MSI-X" : "unknown");
shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
"wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
fnic->rq_count, fnic->cq_count, fnic->intr_count);
/* Allocate Raw WQ used for FCS frames */
for (i = 0; i < fnic->raw_wq_count; i++) {
err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i,
fnic->config.wq_enet_desc_count,
sizeof(struct wq_enet_desc));
if (err)
goto err_out_cleanup;
}
/* Allocate Copy WQs used for SCSI IOs */
for (i = 0; i < fnic->wq_copy_count; i++) {
err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
(fnic->raw_wq_count + i),
fnic->config.wq_copy_desc_count,
sizeof(struct fcpio_host_req));
if (err)
goto err_out_cleanup;
}
/* RQ for receiving FCS frames */
for (i = 0; i < fnic->rq_count; i++) {
err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
fnic->config.rq_desc_count,
sizeof(struct rq_enet_desc));
if (err)
goto err_out_cleanup;
}
/* CQ for each RQ */
for (i = 0; i < fnic->rq_count; i++) {
cq_index = i;
err = vnic_cq_alloc(fnic->vdev,
&fnic->cq[cq_index], cq_index,
fnic->config.rq_desc_count,
sizeof(struct cq_enet_rq_desc));
if (err)
goto err_out_cleanup;
}
/* CQ for each WQ */
for (i = 0; i < fnic->raw_wq_count; i++) {
cq_index = fnic->rq_count + i;
err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
fnic->config.wq_enet_desc_count,
sizeof(struct cq_enet_wq_desc));
if (err)
goto err_out_cleanup;
}
/* CQ for each COPY WQ */
wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3);
for (i = 0; i < fnic->wq_copy_count; i++) {
cq_index = fnic->raw_wq_count + fnic->rq_count + i;
err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
cq_index,
wq_copy_cq_desc_count,
sizeof(struct fcpio_fw_req));
if (err)
goto err_out_cleanup;
}
for (i = 0; i < fnic->intr_count; i++) {
err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i);
if (err)
goto err_out_cleanup;
}
fnic->legacy_pba = vnic_dev_get_res(fnic->vdev,
RES_TYPE_INTR_PBA_LEGACY, 0);
if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
shost_printk(KERN_ERR, fnic->lport->host,
"Failed to hook legacy pba resource\n");
err = -ENODEV;
goto err_out_cleanup;
}
/*
* Init RQ/WQ resources.
*
* RQ[0 to n-1] point to CQ[0 to n-1]
* WQ[0 to m-1] point to CQ[n to n+m-1]
* WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1]
*
* Note for copy wq we always initialize with cq_index = 0
*
* Error interrupt is not enabled for MSI.
*/
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_INTX:
case VNIC_DEV_INTR_MODE_MSIX:
error_interrupt_enable = 1;
error_interrupt_offset = fnic->err_intr_offset;
break;
default:
error_interrupt_enable = 0;
error_interrupt_offset = 0;
break;
}
for (i = 0; i < fnic->rq_count; i++) {
cq_index = i;
vnic_rq_init(&fnic->rq[i],
cq_index,
error_interrupt_enable,
error_interrupt_offset);
}
for (i = 0; i < fnic->raw_wq_count; i++) {
cq_index = i + fnic->rq_count;
vnic_wq_init(&fnic->wq[i],
cq_index,
error_interrupt_enable,
error_interrupt_offset);
}
for (i = 0; i < fnic->wq_copy_count; i++) {
vnic_wq_copy_init(&fnic->wq_copy[i],
0 /* cq_index 0 - always */,
error_interrupt_enable,
error_interrupt_offset);
}
for (i = 0; i < fnic->cq_count; i++) {
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSIX:
interrupt_offset = i;
break;
default:
interrupt_offset = 0;
break;
}
vnic_cq_init(&fnic->cq[i],
0 /* flow_control_enable */,
1 /* color_enable */,
0 /* cq_head */,
0 /* cq_tail */,
1 /* cq_tail_color */,
1 /* interrupt_enable */,
1 /* cq_entry_enable */,
0 /* cq_message_enable */,
interrupt_offset,
0 /* cq_message_addr */);
}
/*
* Init INTR resources
*
* mask_on_assertion is not used for INTx due to the level-
* triggered nature of INTx
*/
switch (intr_mode) {
case VNIC_DEV_INTR_MODE_MSI:
case VNIC_DEV_INTR_MODE_MSIX:
mask_on_assertion = 1;
break;
default:
mask_on_assertion = 0;
break;
}
for (i = 0; i < fnic->intr_count; i++) {
vnic_intr_init(&fnic->intr[i],
fnic->config.intr_timer,
fnic->config.intr_timer_type,
mask_on_assertion);
}
/* init the stats memory by making the first call here */
err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"vnic_dev_stats_dump failed - x%x\n", err);
goto err_out_cleanup;
}
/* Clear LIF stats */
vnic_dev_stats_clear(fnic->vdev);
return 0;
err_out_cleanup:
fnic_free_vnic_resources(fnic);
return err;
}
| linux-master | drivers/scsi/fnic/fnic_res.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2008 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "vnic_dev.h"
#include "vnic_wq.h"
static int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int index, enum vnic_res_type res_type)
{
wq->ctrl = vnic_dev_get_res(vdev, res_type, index);
if (!wq->ctrl)
return -EINVAL;
return 0;
}
static int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
}
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC);
if (!wq->bufs[i]) {
printk(KERN_ERR "Failed to alloc wq_bufs\n");
return -ENOMEM;
}
}
for (i = 0; i < blks; i++) {
buf = wq->bufs[i];
for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES; j++) {
buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES + j;
buf->desc = (u8 *)wq->ring.descs +
wq->ring.desc_size * buf->index;
if (buf->index + 1 == count) {
buf->next = wq->bufs[0];
break;
} else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES) {
buf->next = wq->bufs[i + 1];
} else {
buf->next = buf + 1;
buf++;
}
}
}
wq->to_use = wq->to_clean = wq->bufs[0];
return 0;
}
void vnic_wq_free(struct vnic_wq *wq)
{
struct vnic_dev *vdev;
unsigned int i;
vdev = wq->vdev;
vnic_dev_free_desc_ring(vdev, &wq->ring);
for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) {
kfree(wq->bufs[i]);
wq->bufs[i] = NULL;
}
wq->ctrl = NULL;
}
int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = index;
wq->vdev = vdev;
wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index);
if (!wq->ctrl) {
printk(KERN_ERR "Failed to hook WQ[%d] resource\n", index);
return -EINVAL;
}
vnic_wq_disable(wq);
err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
if (err)
return err;
err = vnic_wq_alloc_bufs(wq);
if (err) {
vnic_wq_free(wq);
return err;
}
return 0;
}
int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
unsigned int desc_count, unsigned int desc_size)
{
int err;
wq->index = 0;
wq->vdev = vdev;
err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2);
if (err) {
pr_err("Failed to get devcmd2 resource\n");
return err;
}
vnic_wq_disable(wq);
err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size);
if (err)
return err;
return 0;
}
void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int fetch_index, unsigned int posted_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
unsigned int count = wq->ring.desc_count;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(count, &wq->ctrl->ring_size);
iowrite32(fetch_index, &wq->ctrl->fetch_index);
iowrite32(posted_index, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
wq->to_use = wq->to_clean =
&wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES]
[fetch_index % VNIC_WQ_BUF_BLK_ENTRIES];
}
void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
u64 paddr;
paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
writeq(paddr, &wq->ctrl->ring_base);
iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size);
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(cq_index, &wq->ctrl->cq_index);
iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable);
iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset);
iowrite32(0, &wq->ctrl->error_status);
}
unsigned int vnic_wq_error_status(struct vnic_wq *wq)
{
return ioread32(&wq->ctrl->error_status);
}
void vnic_wq_enable(struct vnic_wq *wq)
{
iowrite32(1, &wq->ctrl->enable);
}
int vnic_wq_disable(struct vnic_wq *wq)
{
unsigned int wait;
iowrite32(0, &wq->ctrl->enable);
/* Wait for HW to ACK disable request */
for (wait = 0; wait < 100; wait++) {
if (!(ioread32(&wq->ctrl->running)))
return 0;
udelay(1);
}
printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
return -ETIMEDOUT;
}
void vnic_wq_clean(struct vnic_wq *wq,
void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf))
{
struct vnic_wq_buf *buf;
BUG_ON(ioread32(&wq->ctrl->enable));
buf = wq->to_clean;
while (vnic_wq_desc_used(wq) > 0) {
(*buf_clean)(wq, buf);
buf = wq->to_clean = buf->next;
wq->ring.desc_avail++;
}
wq->to_use = wq->to_clean = wq->bufs[0];
iowrite32(0, &wq->ctrl->fetch_index);
iowrite32(0, &wq->ctrl->posted_index);
iowrite32(0, &wq->ctrl->error_status);
vnic_dev_clear_desc_ring(&wq->ring);
}
| linux-master | drivers/scsi/fnic/vnic_wq.c |
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/fault-inject.h>
#include <linux/module.h>
#include "ufs-fault-injection.h"
static int ufs_fault_get(char *buffer, const struct kernel_param *kp);
static int ufs_fault_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops ufs_fault_ops = {
.get = ufs_fault_get,
.set = ufs_fault_set,
};
enum { FAULT_INJ_STR_SIZE = 80 };
/*
* For more details about fault injection, please refer to
* Documentation/fault-injection/fault-injection.rst.
*/
static char g_trigger_eh_str[FAULT_INJ_STR_SIZE];
module_param_cb(trigger_eh, &ufs_fault_ops, g_trigger_eh_str, 0644);
MODULE_PARM_DESC(trigger_eh,
"Fault injection. trigger_eh=<interval>,<probability>,<space>,<times>");
static DECLARE_FAULT_ATTR(ufs_trigger_eh_attr);
static char g_timeout_str[FAULT_INJ_STR_SIZE];
module_param_cb(timeout, &ufs_fault_ops, g_timeout_str, 0644);
MODULE_PARM_DESC(timeout,
"Fault injection. timeout=<interval>,<probability>,<space>,<times>");
static DECLARE_FAULT_ATTR(ufs_timeout_attr);
static int ufs_fault_get(char *buffer, const struct kernel_param *kp)
{
const char *fault_str = kp->arg;
return sysfs_emit(buffer, "%s\n", fault_str);
}
static int ufs_fault_set(const char *val, const struct kernel_param *kp)
{
struct fault_attr *attr = NULL;
if (kp->arg == g_trigger_eh_str)
attr = &ufs_trigger_eh_attr;
else if (kp->arg == g_timeout_str)
attr = &ufs_timeout_attr;
if (WARN_ON_ONCE(!attr))
return -EINVAL;
if (!setup_fault_attr(attr, (char *)val))
return -EINVAL;
strscpy(kp->arg, val, FAULT_INJ_STR_SIZE);
return 0;
}
bool ufs_trigger_eh(void)
{
return should_fail(&ufs_trigger_eh_attr, 1);
}
bool ufs_fail_completion(void)
{
return should_fail(&ufs_timeout_attr, 1);
}
| linux-master | drivers/ufs/core/ufs-fault-injection.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Universal Flash Storage Host controller driver Core
* Copyright (C) 2011-2013 Samsung India Software Operations
* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <[email protected]>
* Vinayak Holikatti <[email protected]>
*/
#include <linux/async.h>
#include <linux/devfreq.h>
#include <linux/nls.h>
#include <linux/of.h>
#include <linux/bitfield.h>
#include <linux/blk-pm.h>
#include <linux/blkdev.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
#include <linux/sched/clock.h>
#include <linux/iopoll.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include "ufshcd-priv.h"
#include <ufs/ufs_quirks.h>
#include <ufs/unipro.h>
#include "ufs-sysfs.h"
#include "ufs-debugfs.h"
#include "ufs-fault-injection.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK |\
MCQ_CQ_EVENT_STATUS)
/* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
/* Timeout after 50 msecs if NOP OUT hangs without response */
#define NOP_OUT_TIMEOUT 50 /* msecs */
/* Query request retries */
#define QUERY_REQ_RETRIES 3
/* Query request timeout */
#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
/* Advanced RPMB request timeout */
#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
/* maximum number of retries for a general UIC command */
#define UFS_UIC_COMMAND_RETRIES 3
/* maximum number of link-startup retries */
#define DME_LINKSTARTUP_RETRIES 3
/* maximum number of reset retries before giving up */
#define MAX_HOST_RESET_RETRIES 5
/* Maximum number of error handler retries before giving up */
#define MAX_ERR_HANDLER_RETRIES 5
/* Expose the flag value from utp_upiu_query.value */
#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
/* Interrupt aggregation default timeout, unit: 40us */
#define INT_AGGR_DEF_TO 0x02
/* default delay of autosuspend: 2000 ms */
#define RPM_AUTOSUSPEND_DELAY_MS 2000
/* Default delay of RPM device flush delayed work */
#define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
/* Default value of wait time before gating device ref clock */
#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
/* Polling time to wait for fDeviceInit */
#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
/* UFSHC 4.0 compliant HC support this mode. */
static bool use_mcq_mode = true;
static bool is_mcq_supported(struct ufs_hba *hba)
{
return hba->mcq_sup && use_mcq_mode;
}
module_param(use_mcq_mode, bool, 0644);
MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
if (_on) \
_ret = ufshcd_enable_vreg(_dev, _vreg); \
else \
_ret = ufshcd_disable_vreg(_dev, _vreg); \
_ret; \
})
#define ufshcd_hex_dump(prefix_str, buf, len) do { \
size_t __len = (len); \
print_hex_dump(KERN_ERR, prefix_str, \
__len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
16, 4, buf, __len, false); \
} while (0)
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix)
{
u32 *regs;
size_t pos;
if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */
return -EINVAL;
regs = kzalloc(len, GFP_ATOMIC);
if (!regs)
return -ENOMEM;
for (pos = 0; pos < len; pos += 4) {
if (offset == 0 &&
pos >= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER &&
pos <= REG_UIC_ERROR_CODE_DME)
continue;
regs[pos / 4] = ufshcd_readl(hba, offset + pos);
}
ufshcd_hex_dump(prefix, regs, len);
kfree(regs);
return 0;
}
EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
};
static const char *const ufshcd_state_name[] = {
[UFSHCD_STATE_RESET] = "reset",
[UFSHCD_STATE_OPERATIONAL] = "operational",
[UFSHCD_STATE_ERROR] = "error",
[UFSHCD_STATE_EH_SCHEDULED_FATAL] = "eh_fatal",
[UFSHCD_STATE_EH_SCHEDULED_NON_FATAL] = "eh_non_fatal",
};
/* UFSHCD error handling flags */
enum {
UFSHCD_EH_IN_PROGRESS = (1 << 0),
};
/* UFSHCD UIC layer error flags */
enum {
UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
};
#define ufshcd_set_eh_in_progress(h) \
((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
#define ufshcd_eh_in_progress(h) \
((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
#define ufshcd_clear_eh_in_progress(h) \
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
const struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
[UFS_PM_LVL_0] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
[UFS_PM_LVL_1] = {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
[UFS_PM_LVL_2] = {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
[UFS_PM_LVL_3] = {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
[UFS_PM_LVL_4] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
[UFS_PM_LVL_5] = {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
/*
* For DeepSleep, the link is first put in hibern8 and then off.
* Leaving the link in hibern8 is not supported.
*/
[UFS_PM_LVL_6] = {UFS_DEEPSLEEP_PWR_MODE, UIC_LINK_OFF_STATE},
};
static inline enum ufs_dev_pwr_mode
ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
{
return ufs_pm_lvl_states[lvl].dev_state;
}
static inline enum uic_link_state
ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
{
return ufs_pm_lvl_states[lvl].link_state;
}
static inline enum ufs_pm_level
ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
enum uic_link_state link_state)
{
enum ufs_pm_level lvl;
for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
(ufs_pm_lvl_states[lvl].link_state == link_state))
return lvl;
}
/* if no match found, return the level 0 */
return UFS_PM_LVL_0;
}
static const struct ufs_dev_quirk ufs_fixups[] = {
/* UFS cards deviations table */
{ .wmanufacturerid = UFS_VENDOR_MICRON,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SAMSUNG,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = "hB8aL1" /*H28U62301AMR*/,
.quirk = UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME },
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
.model = "THGLF2G9C8KBADG",
.quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
.model = "THGLF2G9D8KBADG",
.quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
{}
};
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
static inline void ufshcd_enable_irq(struct ufs_hba *hba)
{
if (!hba->is_irq_enabled) {
enable_irq(hba->irq);
hba->is_irq_enabled = true;
}
}
static inline void ufshcd_disable_irq(struct ufs_hba *hba)
{
if (hba->is_irq_enabled) {
disable_irq(hba->irq);
hba->is_irq_enabled = false;
}
}
static void ufshcd_configure_wb(struct ufs_hba *hba)
{
if (!ufshcd_is_wb_allowed(hba))
return;
ufshcd_wb_toggle(hba, true);
ufshcd_wb_toggle_buf_flush_during_h8(hba, true);
if (ufshcd_is_wb_buf_flush_allowed(hba))
ufshcd_wb_toggle_buf_flush(hba, true);
}
static void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
{
if (atomic_dec_and_test(&hba->scsi_block_reqs_cnt))
scsi_unblock_requests(hba->host);
}
static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
{
if (atomic_inc_return(&hba->scsi_block_reqs_cnt) == 1)
scsi_block_requests(hba->host);
}
static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
struct utp_upiu_header *header;
if (!trace_ufshcd_upiu_enabled())
return;
if (str_t == UFS_CMD_SEND)
header = &rq->header;
else
header = &hba->lrb[tag].ucd_rsp_ptr->header;
trace_ufshcd_upiu(dev_name(hba->dev), str_t, header, &rq->sc.cdb,
UFS_TSF_CDB);
}
static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
enum ufs_trace_str_t str_t,
struct utp_upiu_req *rq_rsp)
{
if (!trace_ufshcd_upiu_enabled())
return;
trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
&rq_rsp->qr, UFS_TSF_OSF);
}
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
if (!trace_ufshcd_upiu_enabled())
return;
if (str_t == UFS_TM_SEND)
trace_ufshcd_upiu(dev_name(hba->dev), str_t,
&descp->upiu_req.req_header,
&descp->upiu_req.input_param1,
UFS_TSF_TM_INPUT);
else
trace_ufshcd_upiu(dev_name(hba->dev), str_t,
&descp->upiu_rsp.rsp_header,
&descp->upiu_rsp.output_param1,
UFS_TSF_TM_OUTPUT);
}
static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
const struct uic_command *ucmd,
enum ufs_trace_str_t str_t)
{
u32 cmd;
if (!trace_ufshcd_uic_command_enabled())
return;
if (str_t == UFS_CMD_SEND)
cmd = ucmd->command;
else
cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
}
static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
u64 lba = 0;
u8 opcode = 0, group_id = 0;
u32 doorbell = 0;
u32 intr;
int hwq_id = -1;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct scsi_cmnd *cmd = lrbp->cmd;
struct request *rq = scsi_cmd_to_rq(cmd);
int transfer_len = -1;
if (!cmd)
return;
/* trace UPIU also */
ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
if (!trace_ufshcd_command_enabled())
return;
opcode = cmd->cmnd[0];
if (opcode == READ_10 || opcode == WRITE_10) {
/*
* Currently we only fully trace read(10) and write(10) commands
*/
transfer_len =
be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
lba = scsi_get_lba(cmd);
if (opcode == WRITE_10)
group_id = lrbp->cmd->cmnd[6];
} else if (opcode == UNMAP) {
/*
* The number of Bytes to be unmapped beginning with the lba.
*/
transfer_len = blk_rq_bytes(rq);
lba = scsi_get_lba(cmd);
}
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
if (is_mcq_enabled(hba)) {
struct ufs_hw_queue *hwq = ufshcd_mcq_req_to_hwq(hba, rq);
hwq_id = hwq->id;
} else {
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
}
trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
doorbell, hwq_id, transfer_len, intr, lba, opcode, group_id);
}
static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
{
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
if (list_empty(head))
return;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
clki->max_freq)
dev_err(hba->dev, "clk: %s, rate: %u\n",
clki->name, clki->curr_freq);
}
}
static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
const char *err_name)
{
int i;
bool found = false;
const struct ufs_event_hist *e;
if (id >= UFS_EVT_CNT)
return;
e = &hba->ufs_stats.event[id];
for (i = 0; i < UFS_EVENT_HIST_LENGTH; i++) {
int p = (i + e->pos) % UFS_EVENT_HIST_LENGTH;
if (e->tstamp[p] == 0)
continue;
dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p,
e->val[p], div_u64(e->tstamp[p], 1000));
found = true;
}
if (!found)
dev_err(hba->dev, "No record of %s\n", err_name);
else
dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
}
static void ufshcd_print_evt_hist(struct ufs_hba *hba)
{
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
ufshcd_print_evt(hba, UFS_EVT_PA_ERR, "pa_err");
ufshcd_print_evt(hba, UFS_EVT_DL_ERR, "dl_err");
ufshcd_print_evt(hba, UFS_EVT_NL_ERR, "nl_err");
ufshcd_print_evt(hba, UFS_EVT_TL_ERR, "tl_err");
ufshcd_print_evt(hba, UFS_EVT_DME_ERR, "dme_err");
ufshcd_print_evt(hba, UFS_EVT_AUTO_HIBERN8_ERR,
"auto_hibern8_err");
ufshcd_print_evt(hba, UFS_EVT_FATAL_ERR, "fatal_err");
ufshcd_print_evt(hba, UFS_EVT_LINK_STARTUP_FAIL,
"link_startup_fail");
ufshcd_print_evt(hba, UFS_EVT_RESUME_ERR, "resume_fail");
ufshcd_print_evt(hba, UFS_EVT_SUSPEND_ERR,
"suspend_fail");
ufshcd_print_evt(hba, UFS_EVT_WL_RES_ERR, "wlun resume_fail");
ufshcd_print_evt(hba, UFS_EVT_WL_SUSP_ERR,
"wlun suspend_fail");
ufshcd_print_evt(hba, UFS_EVT_DEV_RESET, "dev_reset");
ufshcd_print_evt(hba, UFS_EVT_HOST_RESET, "host_reset");
ufshcd_print_evt(hba, UFS_EVT_ABORT, "task_abort");
ufshcd_vops_dbg_register_dump(hba);
}
static
void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
{
const struct ufshcd_lrb *lrbp;
int prdt_length;
lrbp = &hba->lrb[tag];
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
sizeof(struct utp_transfer_req_desc));
dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
(u64)lrbp->ucd_req_dma_addr);
ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
sizeof(struct utp_upiu_req));
dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
(u64)lrbp->ucd_rsp_dma_addr);
ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
sizeof(struct utp_upiu_rsp));
prdt_length = le16_to_cpu(
lrbp->utr_descriptor_ptr->prd_table_length);
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
prdt_length /= ufshcd_sg_entry_size(hba);
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
tag, prdt_length,
(u64)lrbp->ucd_prdt_dma_addr);
if (pr_prdt)
ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
ufshcd_sg_entry_size(hba) * prdt_length);
}
static bool ufshcd_print_tr_iter(struct request *req, void *priv)
{
struct scsi_device *sdev = req->q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
ufshcd_print_tr(hba, req->tag, *(bool *)priv);
return true;
}
/**
* ufshcd_print_trs_all - print trs for all started requests.
* @hba: per-adapter instance.
* @pr_prdt: need to print prdt or not.
*/
static void ufshcd_print_trs_all(struct ufs_hba *hba, bool pr_prdt)
{
blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_print_tr_iter, &pr_prdt);
}
static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
{
int tag;
for_each_set_bit(tag, &bitmap, hba->nutmrs) {
struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
}
}
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
hba->outstanding_reqs, hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
hba->curr_dev_pwr_mode, hba->uic_link_state);
dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
hba->pm_op_in_progress, hba->is_sys_suspended);
dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
hba->auto_bkops_enabled, hba->host->host_self_blocked);
dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
dev_err(hba->dev,
"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
div_u64(hba->ufs_stats.last_hibern8_exit_tstamp, 1000),
hba->ufs_stats.hibern8_exit_cnt);
dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
div_u64(hba->ufs_stats.last_intr_ts, 1000),
hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
hba->ufs_version, hba->capabilities, hba->caps);
dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
hba->dev_quirks);
if (sdev_ufs)
dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
ufshcd_print_clk_freqs(hba);
}
/**
* ufshcd_print_pwr_info - print power params as saved in hba
* power info
* @hba: per-adapter instance
*/
static void ufshcd_print_pwr_info(struct ufs_hba *hba)
{
static const char * const names[] = {
"INVALID MODE",
"FAST MODE",
"SLOW_MODE",
"INVALID MODE",
"FASTAUTO_MODE",
"SLOWAUTO_MODE",
"INVALID MODE",
};
/*
* Using dev_dbg to avoid messages during runtime PM to avoid
* never-ending cycles of messages written back to storage by user space
* causing runtime resume, causing more messages and so on.
*/
dev_dbg(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
__func__,
hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
names[hba->pwr_info.pwr_rx],
names[hba->pwr_info.pwr_tx],
hba->pwr_info.hs_rate);
}
static void ufshcd_device_reset(struct ufs_hba *hba)
{
int err;
err = ufshcd_vops_device_reset(hba);
if (!err) {
ufshcd_set_ufs_dev_active(hba);
if (ufshcd_is_wb_allowed(hba)) {
hba->dev_info.wb_enabled = false;
hba->dev_info.wb_buf_flush_enabled = false;
}
}
if (err != -EOPNOTSUPP)
ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
}
void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
{
if (!us)
return;
if (us < 10)
udelay(us);
else
usleep_range(us, us + tolerance);
}
EXPORT_SYMBOL_GPL(ufshcd_delay_us);
/**
* ufshcd_wait_for_register - wait for register value to change
* @hba: per-adapter interface
* @reg: mmio register offset
* @mask: mask to apply to the read register value
* @val: value to wait for
* @interval_us: polling interval in microseconds
* @timeout_ms: timeout in milliseconds
*
* Return: -ETIMEDOUT on error, zero on success.
*/
static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
u32 val, unsigned long interval_us,
unsigned long timeout_ms)
{
int err = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
/* ignore bits that we don't intend to wait on */
val = val & mask;
while ((ufshcd_readl(hba, reg) & mask) != val) {
usleep_range(interval_us, interval_us + 50);
if (time_after(jiffies, timeout)) {
if ((ufshcd_readl(hba, reg) & mask) != val)
err = -ETIMEDOUT;
break;
}
}
return err;
}
/**
* ufshcd_get_intr_mask - Get the interrupt bit mask
* @hba: Pointer to adapter instance
*
* Return: interrupt bit mask per version
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
if (hba->ufs_version == ufshci_version(1, 0))
return INTERRUPT_MASK_ALL_VER_10;
if (hba->ufs_version <= ufshci_version(2, 0))
return INTERRUPT_MASK_ALL_VER_11;
return INTERRUPT_MASK_ALL_VER_21;
}
/**
* ufshcd_get_ufs_version - Get the UFS version supported by the HBA
* @hba: Pointer to adapter instance
*
* Return: UFSHCI version supported by the controller
*/
static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
{
u32 ufshci_ver;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
ufshci_ver = ufshcd_vops_get_ufs_hci_version(hba);
else
ufshci_ver = ufshcd_readl(hba, REG_UFS_VERSION);
/*
* UFSHCI v1.x uses a different version scheme, in order
* to allow the use of comparisons with the ufshci_version
* function, we convert it to the same scheme as ufs 2.0+.
*/
if (ufshci_ver & 0x00010000)
return ufshci_version(1, ufshci_ver & 0x00000100);
return ufshci_ver;
}
/**
* ufshcd_is_device_present - Check if any device connected to
* the host controller
* @hba: pointer to adapter instance
*
* Return: true if device present, false if no device detected
*/
static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
{
return ufshcd_readl(hba, REG_CONTROLLER_STATUS) & DEVICE_PRESENT;
}
/**
* ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
* @lrbp: pointer to local command reference block
* @cqe: pointer to the completion queue entry
*
* This function is used to get the OCS field from UTRD
*
* Return: the OCS field in the UTRD.
*/
static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp,
struct cq_entry *cqe)
{
if (cqe)
return le32_to_cpu(cqe->status) & MASK_OCS;
return lrbp->utr_descriptor_ptr->header.ocs & MASK_OCS;
}
/**
* ufshcd_utrl_clear() - Clear requests from the controller request list.
* @hba: per adapter instance
* @mask: mask with one bit set for each request to be cleared
*/
static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask)
{
if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
mask = ~mask;
/*
* From the UFSHCI specification: "UTP Transfer Request List CLear
* Register (UTRLCLR): This field is bit significant. Each bit
* corresponds to a slot in the UTP Transfer Request List, where bit 0
* corresponds to request slot 0. A bit in this field is set to ‘0’
* by host software to indicate to the host controller that a transfer
* request slot is cleared. The host controller
* shall free up any resources associated to the request slot
* immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
* host software indicates no change to request slots by setting the
* associated bits in this field to ‘1’. Bits in this field shall only
* be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
*/
ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR);
}
/**
* ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
* @hba: per adapter instance
* @pos: position of the bit to be cleared
*/
static inline void ufshcd_utmrl_clear(struct ufs_hba *hba, u32 pos)
{
if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR)
ufshcd_writel(hba, (1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
else
ufshcd_writel(hba, ~(1 << pos), REG_UTP_TASK_REQ_LIST_CLEAR);
}
/**
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
*
* Return: 0 on success; a positive value if failed.
*/
static inline int ufshcd_get_lists_status(u32 reg)
{
return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
}
/**
* ufshcd_get_uic_cmd_result - Get the UIC command result
* @hba: Pointer to adapter instance
*
* This function gets the result of UIC command completion
*
* Return: 0 on success; non-zero value on error.
*/
static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
{
return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
MASK_UIC_COMMAND_RESULT;
}
/**
* ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
* @hba: Pointer to adapter instance
*
* This function gets UIC command argument3
*
* Return: 0 on success; non-zero value on error.
*/
static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
{
return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
}
/**
* ufshcd_get_req_rsp - returns the TR response transaction type
* @ucd_rsp_ptr: pointer to response UPIU
*
* Return: UPIU type.
*/
static inline enum upiu_response_transaction
ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
{
return ucd_rsp_ptr->header.transaction_code;
}
/**
* ufshcd_is_exception_event - Check if the device raised an exception event
* @ucd_rsp_ptr: pointer to response UPIU
*
* The function checks if the device raised an exception event indicated in
* the Device Information field of response UPIU.
*
* Return: true if exception is raised, false otherwise.
*/
static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
{
return ucd_rsp_ptr->header.device_information & 1;
}
/**
* ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
* @hba: per adapter instance
*/
static inline void
ufshcd_reset_intr_aggr(struct ufs_hba *hba)
{
ufshcd_writel(hba, INT_AGGR_ENABLE |
INT_AGGR_COUNTER_AND_TIMER_RESET,
REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
}
/**
* ufshcd_config_intr_aggr - Configure interrupt aggregation values.
* @hba: per adapter instance
* @cnt: Interrupt aggregation counter threshold
* @tmout: Interrupt aggregation timeout value
*/
static inline void
ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
{
ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
INT_AGGR_COUNTER_THLD_VAL(cnt) |
INT_AGGR_TIMEOUT_VAL(tmout),
REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
}
/**
* ufshcd_disable_intr_aggr - Disables interrupt aggregation.
* @hba: per adapter instance
*/
static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
{
ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
}
/**
* ufshcd_enable_run_stop_reg - Enable run-stop registers,
* When run-stop registers are set to 1, it indicates the
* host controller that it can process the requests
* @hba: per adapter instance
*/
static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
{
ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
REG_UTP_TASK_REQ_LIST_RUN_STOP);
ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
}
/**
* ufshcd_hba_start - Start controller initialization sequence
* @hba: per adapter instance
*/
static inline void ufshcd_hba_start(struct ufs_hba *hba)
{
u32 val = CONTROLLER_ENABLE;
if (ufshcd_crypto_enable(hba))
val |= CRYPTO_GENERAL_ENABLE;
ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
}
/**
* ufshcd_is_hba_active - Get controller state
* @hba: per adapter instance
*
* Return: true if and only if the controller is active.
*/
bool ufshcd_is_hba_active(struct ufs_hba *hba)
{
return ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE;
}
EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
{
/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
if (hba->ufs_version <= ufshci_version(1, 1))
return UFS_UNIPRO_VER_1_41;
else
return UFS_UNIPRO_VER_1_6;
}
EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
{
/*
* If both host and device support UniPro ver1.6 or later, PA layer
* parameters tuning happens during link startup itself.
*
* We can manually tune PA layer parameters if either host or device
* doesn't support UniPro ver 1.6 or later. But to keep manual tuning
* logic simple, we will only do manual tuning if local unipro version
* doesn't support ver1.6 or later.
*/
return ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6;
}
/**
* ufshcd_set_clk_freq - set UFS controller clock frequencies
* @hba: per adapter instance
* @scale_up: If True, set max possible frequency othewise set low frequency
*
* Return: 0 if successful; < 0 upon failure.
*/
static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
if (list_empty(head))
goto out;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) {
if (clki->curr_freq == clki->max_freq)
continue;
ret = clk_set_rate(clki->clk, clki->max_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
__func__, clki->name,
clki->max_freq, ret);
break;
}
trace_ufshcd_clk_scaling(dev_name(hba->dev),
"scaled up", clki->name,
clki->curr_freq,
clki->max_freq);
clki->curr_freq = clki->max_freq;
} else if (!scale_up && clki->min_freq) {
if (clki->curr_freq == clki->min_freq)
continue;
ret = clk_set_rate(clki->clk, clki->min_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
__func__, clki->name,
clki->min_freq, ret);
break;
}
trace_ufshcd_clk_scaling(dev_name(hba->dev),
"scaled down", clki->name,
clki->curr_freq,
clki->min_freq);
clki->curr_freq = clki->min_freq;
}
}
dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
clki->name, clk_get_rate(clki->clk));
}
out:
return ret;
}
/**
* ufshcd_scale_clks - scale up or scale down UFS controller clocks
* @hba: per adapter instance
* @scale_up: True if scaling up and false if scaling down
*
* Return: 0 if successful; < 0 upon failure.
*/
static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
ktime_t start = ktime_get();
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
if (ret)
goto out;
ret = ufshcd_set_clk_freq(hba, scale_up);
if (ret)
goto out;
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
if (ret)
ufshcd_set_clk_freq(hba, !scale_up);
out:
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
}
/**
* ufshcd_is_devfreq_scaling_required - check if scaling is required or not
* @hba: per adapter instance
* @scale_up: True if scaling up and false if scaling down
*
* Return: true if scaling is required, false otherwise.
*/
static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
bool scale_up)
{
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
if (list_empty(head))
return false;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) {
if (clki->curr_freq == clki->max_freq)
continue;
return true;
} else if (!scale_up && clki->min_freq) {
if (clki->curr_freq == clki->min_freq)
continue;
return true;
}
}
}
return false;
}
/*
* Determine the number of pending commands by counting the bits in the SCSI
* device budget maps. This approach has been selected because a bit is set in
* the budget map before scsi_host_queue_ready() checks the host_self_blocked
* flag. The host_self_blocked flag can be modified by calling
* scsi_block_requests() or scsi_unblock_requests().
*/
static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
{
const struct scsi_device *sdev;
u32 pending = 0;
lockdep_assert_held(hba->host->host_lock);
__shost_for_each_device(sdev, hba->host)
pending += sbitmap_weight(&sdev->budget_map);
return pending;
}
/*
* Wait until all pending SCSI commands and TMFs have finished or the timeout
* has expired.
*
* Return: 0 upon success; -EBUSY upon timeout.
*/
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
unsigned long flags;
int ret = 0;
u32 tm_doorbell;
u32 tr_pending;
bool timeout = false, do_last_check = false;
ktime_t start;
ufshcd_hold(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* Wait for all the outstanding tasks/transfer requests.
* Verify by checking the doorbell registers are clear.
*/
start = ktime_get();
do {
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
ret = -EBUSY;
goto out;
}
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
tr_pending = ufshcd_pending_cmds(hba);
if (!tm_doorbell && !tr_pending) {
timeout = false;
break;
} else if (do_last_check) {
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
io_schedule_timeout(msecs_to_jiffies(20));
if (ktime_to_us(ktime_sub(ktime_get(), start)) >
wait_timeout_us) {
timeout = true;
/*
* We might have scheduled out for long time so make
* sure to check if doorbells are cleared by this time
* or not.
*/
do_last_check = true;
}
spin_lock_irqsave(hba->host->host_lock, flags);
} while (tm_doorbell || tr_pending);
if (timeout) {
dev_err(hba->dev,
"%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
__func__, tm_doorbell, tr_pending);
ret = -EBUSY;
}
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba);
return ret;
}
/**
* ufshcd_scale_gear - scale up/down UFS gear
* @hba: per adapter instance
* @scale_up: True for scaling up gear and false for scaling down
*
* Return: 0 for success; -EBUSY if scaling can't happen at this time;
* non-zero for any other errors.
*/
static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
struct ufs_pa_layer_attr new_pwr_info;
if (scale_up) {
memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info,
sizeof(struct ufs_pa_layer_attr));
} else {
memcpy(&new_pwr_info, &hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
if (hba->pwr_info.gear_tx > hba->clk_scaling.min_gear ||
hba->pwr_info.gear_rx > hba->clk_scaling.min_gear) {
/* save the current power mode */
memcpy(&hba->clk_scaling.saved_pwr_info,
&hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
/* scale down gear */
new_pwr_info.gear_tx = hba->clk_scaling.min_gear;
new_pwr_info.gear_rx = hba->clk_scaling.min_gear;
}
}
/* check if the power mode needs to be changed or not? */
ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
if (ret)
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
__func__, ret,
hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
new_pwr_info.gear_tx, new_pwr_info.gear_rx);
return ret;
}
/*
* Wait until all pending SCSI commands and TMFs have finished or the timeout
* has expired.
*
* Return: 0 upon success; -EBUSY upon timeout.
*/
static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba, u64 timeout_us)
{
int ret = 0;
/*
* make sure that there are no outstanding requests when
* clock scaling is in progress
*/
ufshcd_scsi_block_requests(hba);
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
if (!hba->clk_scaling.is_allowed ||
ufshcd_wait_for_doorbell_clr(hba, timeout_us)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
ufshcd_scsi_unblock_requests(hba);
goto out;
}
/* let's not get into low power until clock scaling is completed */
ufshcd_hold(hba);
out:
return ret;
}
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up)
{
up_write(&hba->clk_scaling_lock);
/* Enable Write Booster if we have scaled up else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
ufshcd_wb_toggle(hba, scale_up);
mutex_unlock(&hba->wb_mutex);
ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
}
/**
* ufshcd_devfreq_scale - scale up/down UFS clocks and gear
* @hba: per adapter instance
* @scale_up: True for scaling up and false for scalin down
*
* Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
* for any other errors.
*/
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
ret = ufshcd_clock_scaling_prepare(hba, 1 * USEC_PER_SEC);
if (ret)
return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
ret = ufshcd_scale_gear(hba, false);
if (ret)
goto out_unprepare;
}
ret = ufshcd_scale_clks(hba, scale_up);
if (ret) {
if (!scale_up)
ufshcd_scale_gear(hba, true);
goto out_unprepare;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
if (ret) {
ufshcd_scale_clks(hba, false);
goto out_unprepare;
}
}
out_unprepare:
ufshcd_clock_scaling_unprepare(hba, ret, scale_up);
return ret;
}
static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.suspend_work);
unsigned long irq_flags;
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return;
}
hba->clk_scaling.is_suspended = true;
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
__ufshcd_suspend_clkscaling(hba);
}
static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_scaling.resume_work);
unsigned long irq_flags;
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (!hba->clk_scaling.is_suspended) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return;
}
hba->clk_scaling.is_suspended = false;
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
devfreq_resume_device(hba->devfreq);
}
static int ufshcd_devfreq_target(struct device *dev,
unsigned long *freq, u32 flags)
{
int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start;
bool scale_up, sched_clk_scaling_suspend_work = false;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
unsigned long irq_flags;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
clki = list_first_entry(&hba->clk_list_head, struct ufs_clk_info, list);
/* Override with the closest supported frequency */
*freq = (unsigned long) clk_round_rate(clki->clk, *freq);
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return 0;
}
if (!hba->clk_scaling.active_reqs)
sched_clk_scaling_suspend_work = true;
if (list_empty(clk_list)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
goto out;
}
/* Decide based on the rounded-off frequency and update */
scale_up = *freq == clki->max_freq;
if (!scale_up)
*freq = clki->min_freq;
/* Update the frequency */
if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
ret = 0;
goto out; /* no state change required */
}
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, scale_up);
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
out:
if (sched_clk_scaling_suspend_work)
queue_work(hba->clk_scaling.workq,
&hba->clk_scaling.suspend_work);
return ret;
}
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
unsigned long flags;
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
memset(stat, 0, sizeof(*stat));
spin_lock_irqsave(hba->host->host_lock, flags);
curr_t = ktime_get();
if (!scaling->window_start_t)
goto start_window;
clki = list_first_entry(clk_list, struct ufs_clk_info, list);
/*
* If current frequency is 0, then the ondemand governor considers
* there's no initial frequency set. And it always requests to set
* to max. frequency.
*/
stat->current_frequency = clki->curr_freq;
if (scaling->is_busy_started)
scaling->tot_busy_t += ktime_us_delta(curr_t,
scaling->busy_start_t);
stat->total_time = ktime_us_delta(curr_t, scaling->window_start_t);
stat->busy_time = scaling->tot_busy_t;
start_window:
scaling->window_start_t = curr_t;
scaling->tot_busy_t = 0;
if (scaling->active_reqs) {
scaling->busy_start_t = curr_t;
scaling->is_busy_started = true;
} else {
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
return 0;
}
static int ufshcd_devfreq_init(struct ufs_hba *hba)
{
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
struct devfreq *devfreq;
int ret;
/* Skip devfreq if we don't have any clocks in the list */
if (list_empty(clk_list))
return 0;
clki = list_first_entry(clk_list, struct ufs_clk_info, list);
dev_pm_opp_add(hba->dev, clki->min_freq, 0);
dev_pm_opp_add(hba->dev, clki->max_freq, 0);
ufshcd_vops_config_scaling_param(hba, &hba->vps->devfreq_profile,
&hba->vps->ondemand_data);
devfreq = devfreq_add_device(hba->dev,
&hba->vps->devfreq_profile,
DEVFREQ_GOV_SIMPLE_ONDEMAND,
&hba->vps->ondemand_data);
if (IS_ERR(devfreq)) {
ret = PTR_ERR(devfreq);
dev_err(hba->dev, "Unable to register with devfreq %d\n", ret);
dev_pm_opp_remove(hba->dev, clki->min_freq);
dev_pm_opp_remove(hba->dev, clki->max_freq);
return ret;
}
hba->devfreq = devfreq;
return 0;
}
static void ufshcd_devfreq_remove(struct ufs_hba *hba)
{
struct list_head *clk_list = &hba->clk_list_head;
struct ufs_clk_info *clki;
if (!hba->devfreq)
return;
devfreq_remove_device(hba->devfreq);
hba->devfreq = NULL;
clki = list_first_entry(clk_list, struct ufs_clk_info, list);
dev_pm_opp_remove(hba->dev, clki->min_freq);
dev_pm_opp_remove(hba->dev, clki->max_freq);
}
static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
unsigned long flags;
devfreq_suspend_device(hba->devfreq);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_scaling.window_start_t = 0;
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
{
unsigned long flags;
bool suspend = false;
cancel_work_sync(&hba->clk_scaling.suspend_work);
cancel_work_sync(&hba->clk_scaling.resume_work);
spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->clk_scaling.is_suspended) {
suspend = true;
hba->clk_scaling.is_suspended = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (suspend)
__ufshcd_suspend_clkscaling(hba);
}
static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
{
unsigned long flags;
bool resume = false;
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_scaling.is_suspended) {
resume = true;
hba->clk_scaling.is_suspended = false;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (resume)
devfreq_resume_device(hba->devfreq);
}
static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", hba->clk_scaling.is_enabled);
}
static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
u32 value;
int err = 0;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
err = -EBUSY;
goto out;
}
value = !!value;
if (value == hba->clk_scaling.is_enabled)
goto out;
ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba);
hba->clk_scaling.is_enabled = value;
if (value) {
ufshcd_resume_clkscaling(hba);
} else {
ufshcd_suspend_clkscaling(hba);
err = ufshcd_devfreq_scale(hba, true);
if (err)
dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
__func__, err);
}
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
out:
up(&hba->host_sem);
return err ? err : count;
}
static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
{
hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
hba->clk_scaling.enable_attr.attr.mode = 0644;
if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
}
static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
{
if (hba->clk_scaling.enable_attr.attr.name)
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
}
static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
{
char wq_name[sizeof("ufs_clkscaling_00")];
if (!ufshcd_is_clkscaling_supported(hba))
return;
if (!hba->clk_scaling.min_gear)
hba->clk_scaling.min_gear = UFS_HS_G1;
INIT_WORK(&hba->clk_scaling.suspend_work,
ufshcd_clk_scaling_suspend_work);
INIT_WORK(&hba->clk_scaling.resume_work,
ufshcd_clk_scaling_resume_work);
snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
hba->host->host_no);
hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
hba->clk_scaling.is_initialized = true;
}
static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
{
if (!hba->clk_scaling.is_initialized)
return;
ufshcd_remove_clk_scaling_sysfs(hba);
destroy_workqueue(hba->clk_scaling.workq);
ufshcd_devfreq_remove(hba);
hba->clk_scaling.is_initialized = false;
}
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
unsigned long flags;
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
return;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_hba_vreg_set_hpm(hba);
ufshcd_setup_clocks(hba, true);
ufshcd_enable_irq(hba);
/* Exit from hibern8 */
if (ufshcd_can_hibern8_during_gating(hba)) {
/* Prevent gating in this path */
hba->clk_gating.is_suspended = true;
if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba);
if (ret)
dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
__func__, ret);
else
ufshcd_set_link_active(hba);
}
hba->clk_gating.is_suspended = false;
}
}
/**
* ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
* Also, exit from hibern8 mode and set the link as active.
* @hba: per adapter instance
*/
void ufshcd_hold(struct ufs_hba *hba)
{
bool flush_result;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba) ||
!hba->clk_gating.is_initialized)
return;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
/*
* Wait for the ungate work to complete if in progress.
* Though the clocks may be in ON state, the link could
* still be in hibner8 state if hibern8 is allowed
* during clock gating.
* Make sure we exit hibern8 state also in addition to
* clocks being ON.
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_result = flush_work(&hba->clk_gating.ungate_work);
if (hba->clk_gating.is_suspended && !flush_result)
return;
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
break;
case REQ_CLKS_OFF:
if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
break;
}
/*
* If we are here, it means gating work is either done or
* currently running. Hence, fall through to cancel gating
* work and to enable clocks.
*/
fallthrough;
case CLKS_OFF:
hba->clk_gating.state = REQ_CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
queue_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
*/
fallthrough;
case REQ_CLKS_ON:
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->clk_gating.ungate_work);
/* Make sure state is CLKS_ON before returning */
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
default:
dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
__func__, hba->clk_gating.state);
break;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
unsigned long flags;
int ret;
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* In case you are here to cancel this work the gating state
* would be marked as REQ_CLKS_ON. In this case save time by
* skipping the gating work and exit after changing the clock
* state to CLKS_ON.
*/
if (hba->clk_gating.is_suspended ||
(hba->clk_gating.state != REQ_CLKS_OFF)) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
goto rel_lock;
}
if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| hba->outstanding_reqs || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
ret = ufshcd_uic_hibern8_enter(hba);
if (ret) {
hba->clk_gating.state = CLKS_ON;
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
__func__, ret);
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
goto out;
}
ufshcd_set_link_hibern8(hba);
}
ufshcd_disable_irq(hba);
ufshcd_setup_clocks(hba, false);
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
/*
* In case you are here to cancel this work the gating state
* would be marked as REQ_CLKS_ON. In this case keep the state
* as REQ_CLKS_ON which would anyway imply that clocks are off
* and a request to turn them on is pending. By doing this way,
* we keep the state machine in tact and this would ultimately
* prevent from doing cancel work multiple times when there are
* new requests arriving before the current cancel work is done.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
rel_lock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
return;
}
/* host lock must be held before calling this variant */
static void __ufshcd_release(struct ufs_hba *hba)
{
if (!ufshcd_is_clkgating_allowed(hba))
return;
hba->clk_gating.active_reqs--;
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
hba->active_uic_cmd || hba->uic_async_done ||
hba->clk_gating.state == CLKS_OFF)
return;
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
queue_delayed_work(hba->clk_gating.clk_gating_workq,
&hba->clk_gating.gate_work,
msecs_to_jiffies(hba->clk_gating.delay_ms));
}
void ufshcd_release(struct ufs_hba *hba)
{
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
__ufshcd_release(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_release);
static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
}
void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.delay_ms = value;
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set);
static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long value;
if (kstrtoul(buf, 0, &value))
return -EINVAL;
ufshcd_clkgate_delay_set(dev, value);
return count;
}
static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
}
static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned long flags;
u32 value;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
value = !!value;
spin_lock_irqsave(hba->host->host_lock, flags);
if (value == hba->clk_gating.is_enabled)
goto out;
if (value)
__ufshcd_release(hba);
else
hba->clk_gating.active_reqs++;
hba->clk_gating.is_enabled = value;
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
return count;
}
static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
{
hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
hba->clk_gating.delay_attr.attr.mode = 0644;
if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
hba->clk_gating.enable_attr.attr.mode = 0644;
if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
}
static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
{
if (hba->clk_gating.delay_attr.attr.name)
device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
if (hba->clk_gating.enable_attr.attr.name)
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
}
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
char wq_name[sizeof("ufs_clk_gating_00")];
if (!ufshcd_is_clkgating_allowed(hba))
return;
hba->clk_gating.state = CLKS_ON;
hba->clk_gating.delay_ms = 150;
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
hba->host->host_no);
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
WQ_MEM_RECLAIM | WQ_HIGHPRI);
ufshcd_init_clk_gating_sysfs(hba);
hba->clk_gating.is_enabled = true;
hba->clk_gating.is_initialized = true;
}
static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
if (!hba->clk_gating.is_initialized)
return;
ufshcd_remove_clk_gating_sysfs(hba);
/* Ungate the clock if necessary. */
ufshcd_hold(hba);
hba->clk_gating.is_initialized = false;
ufshcd_release(hba);
destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
ktime_t curr_t = ktime_get();
unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
return;
}
if (queue_resume_work)
queue_work(hba->clk_scaling.workq,
&hba->clk_scaling.resume_work);
if (!hba->clk_scaling.window_start_t) {
hba->clk_scaling.window_start_t = curr_t;
hba->clk_scaling.tot_busy_t = 0;
hba->clk_scaling.is_busy_started = false;
}
if (!hba->clk_scaling.is_busy_started) {
hba->clk_scaling.busy_start_t = curr_t;
hba->clk_scaling.is_busy_started = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
unsigned long flags;
if (!ufshcd_is_clkscaling_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_scaling.active_reqs--;
if (!scaling->active_reqs && scaling->is_busy_started) {
scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
scaling->busy_start_t));
scaling->busy_start_t = 0;
scaling->is_busy_started = false;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static inline int ufshcd_monitor_opcode2dir(u8 opcode)
{
if (opcode == READ_6 || opcode == READ_10 || opcode == READ_16)
return READ;
else if (opcode == WRITE_6 || opcode == WRITE_10 || opcode == WRITE_16)
return WRITE;
else
return -EINVAL;
}
static inline bool ufshcd_should_inform_monitor(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
const struct ufs_hba_monitor *m = &hba->monitor;
return (m->enabled && lrbp && lrbp->cmd &&
(!m->chunk_size || m->chunk_size == lrbp->cmd->sdb.length) &&
ktime_before(hba->monitor.enabled_ts, lrbp->issue_time_stamp));
}
static void ufshcd_start_monitor(struct ufs_hba *hba,
const struct ufshcd_lrb *lrbp)
{
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir]++ == 0)
hba->monitor.busy_start_ts[dir] = ktime_get();
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb *lrbp)
{
int dir = ufshcd_monitor_opcode2dir(*lrbp->cmd->cmnd);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
if (dir >= 0 && hba->monitor.nr_queued[dir] > 0) {
const struct request *req = scsi_cmd_to_rq(lrbp->cmd);
struct ufs_hba_monitor *m = &hba->monitor;
ktime_t now, inc, lat;
now = lrbp->compl_time_stamp;
inc = ktime_sub(now, m->busy_start_ts[dir]);
m->total_busy[dir] = ktime_add(m->total_busy[dir], inc);
m->nr_sec_rw[dir] += blk_rq_sectors(req);
/* Update latencies */
m->nr_req[dir]++;
lat = ktime_sub(now, lrbp->issue_time_stamp);
m->lat_sum[dir] += lat;
if (m->lat_max[dir] < lat || !m->lat_max[dir])
m->lat_max[dir] = lat;
if (m->lat_min[dir] > lat || !m->lat_min[dir])
m->lat_min[dir] = lat;
m->nr_queued[dir]--;
/* Push forward the busy start of monitor */
m->busy_start_ts[dir] = now;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
* @task_tag: Task tag of the command
* @hwq: pointer to hardware queue instance
*/
static inline
void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
struct ufs_hw_queue *hwq)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
unsigned long flags;
lrbp->issue_time_stamp = ktime_get();
lrbp->issue_time_stamp_local_clock = local_clock();
lrbp->compl_time_stamp = ktime_set(0, 0);
lrbp->compl_time_stamp_local_clock = 0;
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_start_monitor(hba, lrbp);
if (is_mcq_enabled(hba)) {
int utrd_size = sizeof(struct utp_transfer_req_desc);
struct utp_transfer_req_desc *src = lrbp->utr_descriptor_ptr;
struct utp_transfer_req_desc *dest = hwq->sqe_base_addr + hwq->sq_tail_slot;
spin_lock(&hwq->sq_lock);
memcpy(dest, src, utrd_size);
ufshcd_inc_sq_tail(hwq);
spin_unlock(&hwq->sq_lock);
} else {
spin_lock_irqsave(&hba->outstanding_lock, flags);
if (hba->vops && hba->vops->setup_xfer_req)
hba->vops->setup_xfer_req(hba, lrbp->task_tag,
!!lrbp->cmd);
__set_bit(lrbp->task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << lrbp->task_tag,
REG_UTP_TRANSFER_REQ_DOOR_BELL);
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
}
}
/**
* ufshcd_copy_sense_data - Copy sense data in case of check condition
* @lrbp: pointer to local reference block
*/
static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
{
u8 *const sense_buffer = lrbp->cmd->sense_buffer;
u16 resp_len;
int len;
resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header.data_segment_length);
if (sense_buffer && resp_len) {
int len_to_copy;
len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
len_to_copy = min_t(int, UFS_SENSE_SIZE, len);
memcpy(sense_buffer, lrbp->ucd_rsp_ptr->sr.sense_data,
len_to_copy);
}
}
/**
* ufshcd_copy_query_response() - Copy the Query Response and the data
* descriptor
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*
* Return: 0 upon success; < 0 upon failure.
*/
static
int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
/* Get the descriptor */
if (hba->dev_cmd.query.descriptor &&
lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
GENERAL_UPIU_REQUEST_SIZE;
u16 resp_len;
u16 buf_len;
/* data segment length */
resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
.data_segment_length);
buf_len = be16_to_cpu(
hba->dev_cmd.query.request.upiu_req.length);
if (likely(buf_len >= resp_len)) {
memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
} else {
dev_warn(hba->dev,
"%s: rsp size %d is bigger than buffer size %d",
__func__, resp_len, buf_len);
return -EINVAL;
}
}
return 0;
}
/**
* ufshcd_hba_capabilities - Read controller capabilities
* @hba: per adapter instance
*
* Return: 0 on success, negative on error.
*/
static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
{
int err;
hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS)
hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT;
/* nutrs and nutmrs are 0 based values */
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
hba->reserved_slot = hba->nutrs - 1;
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
if (err) {
dev_err(hba->dev, "crypto setup failed\n");
return err;
}
hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities);
if (!hba->mcq_sup)
return 0;
hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP);
hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT,
hba->mcq_capabilities);
return 0;
}
/**
* ufshcd_ready_for_uic_cmd - Check if controller is ready
* to accept UIC commands
* @hba: per adapter instance
*
* Return: true on success, else false.
*/
static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
{
u32 val;
int ret = read_poll_timeout(ufshcd_readl, val, val & UIC_COMMAND_READY,
500, UIC_CMD_TIMEOUT * 1000, false, hba,
REG_CONTROLLER_STATUS);
return ret == 0 ? true : false;
}
/**
* ufshcd_get_upmcrs - Get the power mode change request status
* @hba: Pointer to adapter instance
*
* This function gets the UPMCRS field of HCS register
*
* Return: value of UPMCRS field.
*/
static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
{
return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
}
/**
* ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
* @hba: per adapter instance
* @uic_cmd: UIC command
*/
static inline void
ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
lockdep_assert_held(&hba->uic_cmd_mutex);
WARN_ON(hba->active_uic_cmd);
hba->active_uic_cmd = uic_cmd;
/* Write Args */
ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
/* Write UIC Cmd */
ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
REG_UIC_COMMAND);
}
/**
* ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
* @hba: per adapter instance
* @uic_cmd: UIC command
*
* Return: 0 only if success.
*/
static int
ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
unsigned long flags;
lockdep_assert_held(&hba->uic_cmd_mutex);
if (wait_for_completion_timeout(&uic_cmd->done,
msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
} else {
ret = -ETIMEDOUT;
dev_err(hba->dev,
"uic cmd 0x%x with arg3 0x%x completion timeout\n",
uic_cmd->command, uic_cmd->argument3);
if (!uic_cmd->cmd_active) {
dev_err(hba->dev, "%s: UIC cmd has been completed, return the result\n",
__func__);
ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
}
}
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
return ret;
}
/**
* __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
* @completion: initialize the completion only if this is set to true
*
* Return: 0 only if success.
*/
static int
__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
bool completion)
{
lockdep_assert_held(&hba->uic_cmd_mutex);
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
"Controller not ready to accept UIC commands\n");
return -EIO;
}
if (completion)
init_completion(&uic_cmd->done);
uic_cmd->cmd_active = 1;
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
return 0;
}
/**
* ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
*
* Return: 0 only if success.
*/
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_UIC_CMD)
return 0;
ufshcd_hold(hba);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
mutex_unlock(&hba->uic_cmd_mutex);
ufshcd_release(hba);
return ret;
}
/**
* ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
* @hba: per-adapter instance
* @lrbp: pointer to local reference block
* @sg_entries: The number of sg lists actually used
* @sg_list: Pointer to SG list
*/
static void ufshcd_sgl_to_prdt(struct ufs_hba *hba, struct ufshcd_lrb *lrbp, int sg_entries,
struct scatterlist *sg_list)
{
struct ufshcd_sg_entry *prd;
struct scatterlist *sg;
int i;
if (sg_entries) {
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
lrbp->utr_descriptor_ptr->prd_table_length =
cpu_to_le16(sg_entries * ufshcd_sg_entry_size(hba));
else
lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_entries);
prd = lrbp->ucd_prdt_ptr;
for_each_sg(sg_list, sg, sg_entries, i) {
const unsigned int len = sg_dma_len(sg);
/*
* From the UFSHCI spec: "Data Byte Count (DBC): A '0'
* based value that indicates the length, in bytes, of
* the data block. A maximum of length of 256KB may
* exist for any entry. Bits 1:0 of this field shall be
* 11b to indicate Dword granularity. A value of '3'
* indicates 4 bytes, '7' indicates 8 bytes, etc."
*/
WARN_ONCE(len > SZ_256K, "len = %#x\n", len);
prd->size = cpu_to_le32(len - 1);
prd->addr = cpu_to_le64(sg->dma_address);
prd->reserved = 0;
prd = (void *)prd + ufshcd_sg_entry_size(hba);
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
}
}
/**
* ufshcd_map_sg - Map scatter-gather list to prdt
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*
* Return: 0 in case of success, non-zero value in case of failure.
*/
static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
struct scsi_cmnd *cmd = lrbp->cmd;
int sg_segments = scsi_dma_map(cmd);
if (sg_segments < 0)
return sg_segments;
ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
return 0;
}
/**
* ufshcd_enable_intr - enable interrupts
* @hba: per adapter instance
* @intrs: interrupt bits
*/
static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (hba->ufs_version == ufshci_version(1, 0)) {
u32 rw;
rw = set & INTERRUPT_MASK_RW_VER_10;
set = rw | ((set ^ intrs) & intrs);
} else {
set |= intrs;
}
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
/**
* ufshcd_disable_intr - disable interrupts
* @hba: per adapter instance
* @intrs: interrupt bits
*/
static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (hba->ufs_version == ufshci_version(1, 0)) {
u32 rw;
rw = (set & INTERRUPT_MASK_RW_VER_10) &
~(intrs & INTERRUPT_MASK_RW_VER_10);
set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
} else {
set &= ~intrs;
}
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
/**
* ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
* descriptor according to request
* @lrbp: pointer to local reference block
* @upiu_flags: flags required in the header
* @cmd_dir: requests data direction
* @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
*/
static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, u8 *upiu_flags,
enum dma_data_direction cmd_dir, int ehs_length)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
struct request_desc_header *h = &req_desc->header;
enum utp_data_direction data_direction;
*h = (typeof(*h)){ };
if (cmd_dir == DMA_FROM_DEVICE) {
data_direction = UTP_DEVICE_TO_HOST;
*upiu_flags = UPIU_CMD_FLAGS_READ;
} else if (cmd_dir == DMA_TO_DEVICE) {
data_direction = UTP_HOST_TO_DEVICE;
*upiu_flags = UPIU_CMD_FLAGS_WRITE;
} else {
data_direction = UTP_NO_DATA_TRANSFER;
*upiu_flags = UPIU_CMD_FLAGS_NONE;
}
h->command_type = lrbp->command_type;
h->data_direction = data_direction;
h->ehs_length = ehs_length;
if (lrbp->intr_cmd)
h->interrupt = 1;
/* Prepare crypto related dwords */
ufshcd_prepare_req_desc_hdr_crypto(lrbp, h);
/*
* assigning invalid value for command status. Controller
* updates OCS on command completion, with the command
* status
*/
h->ocs = OCS_INVALID_COMMAND_STATUS;
req_desc->prd_table_length = 0;
}
/**
* ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
* for scsi commands
* @lrbp: local reference block pointer
* @upiu_flags: flags
*/
static
void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u8 upiu_flags)
{
struct scsi_cmnd *cmd = lrbp->cmd;
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
unsigned short cdb_len;
ucd_req_ptr->header = (struct utp_upiu_header){
.transaction_code = UPIU_TRANSACTION_COMMAND,
.flags = upiu_flags,
.lun = lrbp->lun,
.task_tag = lrbp->task_tag,
.command_set_type = UPIU_COMMAND_SET_TYPE_SCSI,
};
ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length);
cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE);
memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
* ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
* @hba: UFS hba
* @lrbp: local reference block pointer
* @upiu_flags: flags
*/
static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, u8 upiu_flags)
{
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
struct ufs_query *query = &hba->dev_cmd.query;
u16 len = be16_to_cpu(query->request.upiu_req.length);
/* Query request header */
ucd_req_ptr->header = (struct utp_upiu_header){
.transaction_code = UPIU_TRANSACTION_QUERY_REQ,
.flags = upiu_flags,
.lun = lrbp->lun,
.task_tag = lrbp->task_tag,
.query_function = query->request.query_func,
/* Data segment length only need for WRITE_DESC */
.data_segment_length =
query->request.upiu_req.opcode ==
UPIU_QUERY_OPCODE_WRITE_DESC ?
cpu_to_be16(len) :
0,
};
/* Copy the Query Request buffer as is */
memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
QUERY_OSF_SIZE);
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
memcpy(ucd_req_ptr + 1, query->descriptor, len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
{
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
ucd_req_ptr->header = (struct utp_upiu_header){
.transaction_code = UPIU_TRANSACTION_NOP_OUT,
.task_tag = lrbp->task_tag,
};
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
* ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
* for Device Management Purposes
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
u8 upiu_flags;
int ret = 0;
if (hba->ufs_version <= ufshci_version(1, 1))
lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
ufshcd_prepare_utp_nop_upiu(lrbp);
else
ret = -EINVAL;
return ret;
}
/**
* ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
* for SCSI Purposes
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
u8 upiu_flags;
int ret = 0;
if (hba->ufs_version <= ufshci_version(1, 1))
lrbp->command_type = UTP_CMD_TYPE_SCSI;
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
if (likely(lrbp->cmd)) {
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, lrbp->cmd->sc_data_direction, 0);
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
} else {
ret = -EINVAL;
}
return ret;
}
/**
* ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
* @upiu_wlun_id: UPIU W-LUN id
*
* Return: SCSI W-LUN id.
*/
static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
{
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
static inline bool is_device_wlun(struct scsi_device *sdev)
{
return sdev->lun ==
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
}
/*
* Associate the UFS controller queue with the default and poll HCTX types.
* Initialize the mq_map[] arrays.
*/
static void ufshcd_map_queues(struct Scsi_Host *shost)
{
struct ufs_hba *hba = shost_priv(shost);
int i, queue_offset = 0;
if (!is_mcq_supported(hba)) {
hba->nr_queues[HCTX_TYPE_DEFAULT] = 1;
hba->nr_queues[HCTX_TYPE_READ] = 0;
hba->nr_queues[HCTX_TYPE_POLL] = 1;
hba->nr_hw_queues = 1;
}
for (i = 0; i < shost->nr_maps; i++) {
struct blk_mq_queue_map *map = &shost->tag_set.map[i];
map->nr_queues = hba->nr_queues[i];
if (!map->nr_queues)
continue;
map->queue_offset = queue_offset;
if (i == HCTX_TYPE_POLL && !is_mcq_supported(hba))
map->queue_offset = 0;
blk_mq_map_queues(map);
queue_offset += map->nr_queues;
}
}
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
i * ufshcd_get_ucd_size(hba);
struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
i * ufshcd_get_ucd_size(hba);
u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
response_upiu);
u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
lrb->utr_descriptor_ptr = utrdlp + i;
lrb->utrd_dma_addr = hba->utrdl_dma_addr +
i * sizeof(struct utp_transfer_req_desc);
lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu;
lrb->ucd_req_dma_addr = cmd_desc_element_addr;
lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
}
/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @host: SCSI host pointer
* @cmd: command from SCSI Midlayer
*
* Return: 0 for success, non-zero in case of failure.
*/
static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
{
struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp;
int err = 0;
struct ufs_hw_queue *hwq = NULL;
WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
/*
* SCSI error handler can call ->queuecommand() while UFS error
* handler is in progress. Error interrupts could change the
* state from UFSHCD_STATE_RESET to
* UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
* being issued in that case.
*/
if (ufshcd_eh_in_progress(hba)) {
err = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
break;
case UFSHCD_STATE_EH_SCHEDULED_FATAL:
/*
* pm_runtime_get_sync() is used at error handling preparation
* stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
* PM ops, it can never be finished if we let SCSI layer keep
* retrying it, which gets err handler stuck forever. Neither
* can we let the scsi cmd pass through, because UFS is in bad
* state, the scsi cmd may eventually time out, which will get
* err handler blocked for too long. So, just fail the scsi cmd
* sent from PM ops, err handler can recover PM error anyways.
*/
if (hba->pm_op_in_progress) {
hba->force_reset = true;
set_host_byte(cmd, DID_BAD_TARGET);
scsi_done(cmd);
goto out;
}
fallthrough;
case UFSHCD_STATE_RESET:
err = SCSI_MLQUEUE_HOST_BUSY;
goto out;
case UFSHCD_STATE_ERROR:
set_host_byte(cmd, DID_ERROR);
scsi_done(cmd);
goto out;
}
hba->req_abort_count = 0;
ufshcd_hold(hba);
lrbp = &hba->lrb[tag];
lrbp->cmd = cmd;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba);
ufshcd_prepare_lrbp_crypto(scsi_cmd_to_rq(cmd), lrbp);
lrbp->req_abort_skip = false;
ufshcd_comp_scsi_upiu(hba, lrbp);
err = ufshcd_map_sg(hba, lrbp);
if (err) {
ufshcd_release(hba);
goto out;
}
if (is_mcq_enabled(hba))
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
ufshcd_send_command(hba, tag, hwq);
out:
if (ufs_trigger_eh()) {
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
return err;
}
static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
{
lrbp->cmd = NULL;
lrbp->task_tag = tag;
lrbp->lun = 0; /* device management cmd is not specific to any LUN */
lrbp->intr_cmd = true; /* No interrupt aggregation */
ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = cmd_type;
return ufshcd_compose_devman_upiu(hba, lrbp);
}
/*
* Check with the block layer if the command is inflight
* @cmd: command to check.
*
* Return: true if command is inflight; false if not.
*/
bool ufshcd_cmd_inflight(struct scsi_cmnd *cmd)
{
struct request *rq;
if (!cmd)
return false;
rq = scsi_cmd_to_rq(cmd);
if (!blk_mq_request_started(rq))
return false;
return true;
}
/*
* Clear the pending command in the controller and wait until
* the controller confirms that the command has been cleared.
* @hba: per adapter instance
* @task_tag: The tag number of the command to be cleared.
*/
static int ufshcd_clear_cmd(struct ufs_hba *hba, u32 task_tag)
{
u32 mask = 1U << task_tag;
unsigned long flags;
int err;
if (is_mcq_enabled(hba)) {
/*
* MCQ mode. Clean up the MCQ resources similar to
* what the ufshcd_utrl_clear() does for SDB mode.
*/
err = ufshcd_mcq_sq_cleanup(hba, task_tag);
if (err) {
dev_err(hba->dev, "%s: failed tag=%d. err=%d\n",
__func__, task_tag, err);
return err;
}
return 0;
}
/* clear outstanding transaction before retry */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utrl_clear(hba, mask);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* wait for h/w to clear corresponding bit in door-bell.
* max. wait is 1 sec.
*/
return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL,
mask, ~mask, 1000, 1000);
}
/**
* ufshcd_dev_cmd_completion() - handles device management command responses
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*
* Return: 0 upon success; < 0 upon failure.
*/
static int
ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
enum upiu_response_transaction resp;
int err = 0;
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
switch (resp) {
case UPIU_TRANSACTION_NOP_IN:
if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
err = -EINVAL;
dev_err(hba->dev, "%s: unexpected response %x\n",
__func__, resp);
}
break;
case UPIU_TRANSACTION_QUERY_RSP: {
u8 response = lrbp->ucd_rsp_ptr->header.response;
if (response == 0)
err = ufshcd_copy_query_response(hba, lrbp);
break;
}
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
err = -EPERM;
dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
__func__);
break;
case UPIU_TRANSACTION_RESPONSE:
if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) {
err = -EINVAL;
dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp);
}
break;
default:
err = -EINVAL;
dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
__func__, resp);
break;
}
return err;
}
static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp, int max_timeout)
{
unsigned long time_left = msecs_to_jiffies(max_timeout);
unsigned long flags;
bool pending;
int err;
retry:
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
time_left);
if (likely(time_left)) {
/*
* The completion handler called complete() and the caller of
* this function still owns the @lrbp tag so the code below does
* not trigger any race conditions.
*/
hba->dev_cmd.complete = NULL;
err = ufshcd_get_tr_ocs(lrbp, NULL);
if (!err)
err = ufshcd_dev_cmd_completion(hba, lrbp);
} else {
err = -ETIMEDOUT;
dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
__func__, lrbp->task_tag);
/* MCQ mode */
if (is_mcq_enabled(hba)) {
err = ufshcd_clear_cmd(hba, lrbp->task_tag);
hba->dev_cmd.complete = NULL;
return err;
}
/* SDB mode */
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0) {
/* successfully cleared the command, retry if needed */
err = -EAGAIN;
/*
* Since clearing the command succeeded we also need to
* clear the task tag bit from the outstanding_reqs
* variable.
*/
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
if (pending) {
hba->dev_cmd.complete = NULL;
__clear_bit(lrbp->task_tag,
&hba->outstanding_reqs);
}
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
/*
* The completion handler ran while we tried to
* clear the command.
*/
time_left = 1;
goto retry;
}
} else {
dev_err(hba->dev, "%s: failed to clear tag %d\n",
__func__, lrbp->task_tag);
spin_lock_irqsave(&hba->outstanding_lock, flags);
pending = test_bit(lrbp->task_tag,
&hba->outstanding_reqs);
if (pending)
hba->dev_cmd.complete = NULL;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (!pending) {
/*
* The completion handler ran while we tried to
* clear the command.
*/
time_left = 1;
goto retry;
}
}
}
return err;
}
/**
* ufshcd_exec_dev_cmd - API for sending device management requests
* @hba: UFS hba
* @cmd_type: specifies the type (NOP, Query...)
* @timeout: timeout in milliseconds
*
* Return: 0 upon success; < 0 upon failure.
*
* NOTE: Since there is only one available tag for device management commands,
* it is expected you hold the hba->dev_cmd.lock mutex.
*/
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err;
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);
down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
lrbp->cmd = NULL;
err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
if (unlikely(err))
goto out;
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
up_read(&hba->clk_scaling_lock);
return err;
}
/**
* ufshcd_init_query() - init the query response and request parameters
* @hba: per-adapter instance
* @request: address of the request pointer to be initialized
* @response: address of the response pointer to be initialized
* @opcode: operation to perform
* @idn: flag idn to access
* @index: LU number to access
* @selector: query/flag/descriptor further identification
*/
static inline void ufshcd_init_query(struct ufs_hba *hba,
struct ufs_query_req **request, struct ufs_query_res **response,
enum query_opcode opcode, u8 idn, u8 index, u8 selector)
{
*request = &hba->dev_cmd.query.request;
*response = &hba->dev_cmd.query.response;
memset(*request, 0, sizeof(struct ufs_query_req));
memset(*response, 0, sizeof(struct ufs_query_res));
(*request)->upiu_req.opcode = opcode;
(*request)->upiu_req.idn = idn;
(*request)->upiu_req.index = index;
(*request)->upiu_req.selector = selector;
}
static int ufshcd_query_flag_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
{
int ret;
int retries;
for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
ret = ufshcd_query_flag(hba, opcode, idn, index, flag_res);
if (ret)
dev_dbg(hba->dev,
"%s: failed with error %d, retries %d\n",
__func__, ret, retries);
else
break;
}
if (ret)
dev_err(hba->dev,
"%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
__func__, opcode, idn, ret, retries);
return ret;
}
/**
* ufshcd_query_flag() - API function for sending flag query requests
* @hba: per-adapter instance
* @opcode: flag query to perform
* @idn: flag idn to access
* @index: flag index to access
* @flag_res: the flag value after the query request completes
*
* Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, u8 index, bool *flag_res)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, selector = 0;
int timeout = QUERY_REQ_TIMEOUT;
BUG_ON(!hba);
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
switch (opcode) {
case UPIU_QUERY_OPCODE_SET_FLAG:
case UPIU_QUERY_OPCODE_CLEAR_FLAG:
case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
break;
case UPIU_QUERY_OPCODE_READ_FLAG:
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
if (!flag_res) {
/* No dummy reads */
dev_err(hba->dev, "%s: Invalid argument for read request\n",
__func__);
err = -EINVAL;
goto out_unlock;
}
break;
default:
dev_err(hba->dev,
"%s: Expected query flag opcode but got = %d\n",
__func__, opcode);
err = -EINVAL;
goto out_unlock;
}
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
dev_err(hba->dev,
"%s: Sending flag query for idn %d failed, err = %d\n",
__func__, idn, err);
goto out_unlock;
}
if (flag_res)
*flag_res = (be32_to_cpu(response->upiu_res.value) &
MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba);
return err;
}
/**
* ufshcd_query_attr - API function for sending attribute requests
* @hba: per-adapter instance
* @opcode: attribute opcode
* @idn: attribute idn to access
* @index: index field
* @selector: selector field
* @attr_val: the attribute value after the query request completes
*
* Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err;
BUG_ON(!hba);
if (!attr_val) {
dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
__func__, opcode);
return -EINVAL;
}
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
switch (opcode) {
case UPIU_QUERY_OPCODE_WRITE_ATTR:
request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
request->upiu_req.value = cpu_to_be32(*attr_val);
break;
case UPIU_QUERY_OPCODE_READ_ATTR:
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
break;
default:
dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
__func__, opcode);
err = -EINVAL;
goto out_unlock;
}
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
__func__, opcode, idn, index, err);
goto out_unlock;
}
*attr_val = be32_to_cpu(response->upiu_res.value);
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba);
return err;
}
/**
* ufshcd_query_attr_retry() - API function for sending query
* attribute with retries
* @hba: per-adapter instance
* @opcode: attribute opcode
* @idn: attribute idn to access
* @index: index field
* @selector: selector field
* @attr_val: the attribute value after the query request
* completes
*
* Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_attr_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
u32 *attr_val)
{
int ret = 0;
u32 retries;
for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
ret = ufshcd_query_attr(hba, opcode, idn, index,
selector, attr_val);
if (ret)
dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
__func__, ret, retries);
else
break;
}
if (ret)
dev_err(hba->dev,
"%s: query attribute, idn %d, failed with error %d after %d retries\n",
__func__, idn, ret, QUERY_REQ_RETRIES);
return ret;
}
static int __ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,
u8 selector, u8 *desc_buf, int *buf_len)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err;
BUG_ON(!hba);
if (!desc_buf) {
dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
__func__, opcode);
return -EINVAL;
}
if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
__func__, *buf_len);
return -EINVAL;
}
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
hba->dev_cmd.query.descriptor = desc_buf;
request->upiu_req.length = cpu_to_be16(*buf_len);
switch (opcode) {
case UPIU_QUERY_OPCODE_WRITE_DESC:
request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
break;
case UPIU_QUERY_OPCODE_READ_DESC:
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
break;
default:
dev_err(hba->dev,
"%s: Expected query descriptor opcode but got = 0x%.2x\n",
__func__, opcode);
err = -EINVAL;
goto out_unlock;
}
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
__func__, opcode, idn, index, err);
goto out_unlock;
}
*buf_len = be16_to_cpu(response->upiu_res.length);
out_unlock:
hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba);
return err;
}
/**
* ufshcd_query_descriptor_retry - API function for sending descriptor requests
* @hba: per-adapter instance
* @opcode: attribute opcode
* @idn: attribute idn to access
* @index: index field
* @selector: selector field
* @desc_buf: the buffer that contains the descriptor
* @buf_len: length parameter passed to the device
*
* The buf_len parameter will contain, on return, the length parameter
* received on the response.
*
* Return: 0 for success, non-zero in case of failure.
*/
int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
enum query_opcode opcode,
enum desc_idn idn, u8 index,
u8 selector,
u8 *desc_buf, int *buf_len)
{
int err;
int retries;
for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
err = __ufshcd_query_descriptor(hba, opcode, idn, index,
selector, desc_buf, buf_len);
if (!err || err == -EINVAL)
break;
}
return err;
}
/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
* @desc_index: descriptor index
* @param_offset: offset of the parameter to read
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
* Return: 0 in case of success, non-zero otherwise.
*/
int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
int desc_index,
u8 param_offset,
u8 *param_read_buf,
u8 param_size)
{
int ret;
u8 *desc_buf;
int buff_len = QUERY_DESC_MAX_SIZE;
bool is_kmalloc = true;
/* Safety check */
if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
return -EINVAL;
/* Check whether we need temp memory */
if (param_offset != 0 || param_size < buff_len) {
desc_buf = kzalloc(buff_len, GFP_KERNEL);
if (!desc_buf)
return -ENOMEM;
} else {
desc_buf = param_read_buf;
is_kmalloc = false;
}
/* Request for full descriptor */
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
desc_id, desc_index, 0,
desc_buf, &buff_len);
if (ret) {
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
__func__, desc_id, desc_index, param_offset, ret);
goto out;
}
/* Update descriptor length */
buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET];
if (param_offset >= buff_len) {
dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
__func__, param_offset, desc_id, buff_len);
ret = -EINVAL;
goto out;
}
/* Sanity check */
if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n",
__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
ret = -EINVAL;
goto out;
}
if (is_kmalloc) {
/* Make sure we don't copy more data than available */
if (param_offset >= buff_len)
ret = -EINVAL;
else
memcpy(param_read_buf, &desc_buf[param_offset],
min_t(u32, param_size, buff_len - param_offset));
}
out:
if (is_kmalloc)
kfree(desc_buf);
return ret;
}
/**
* struct uc_string_id - unicode string
*
* @len: size of this descriptor inclusive
* @type: descriptor type
* @uc: unicode string character
*/
struct uc_string_id {
u8 len;
u8 type;
wchar_t uc[];
} __packed;
/* replace non-printable or non-ASCII characters with spaces */
static inline char ufshcd_remove_non_printable(u8 ch)
{
return (ch >= 0x20 && ch <= 0x7e) ? ch : ' ';
}
/**
* ufshcd_read_string_desc - read string descriptor
* @hba: pointer to adapter instance
* @desc_index: descriptor index
* @buf: pointer to buffer where descriptor would be read,
* the caller should free the memory.
* @ascii: if true convert from unicode to ascii characters
* null terminated string.
*
* Return:
* * string size on success.
* * -ENOMEM: on allocation failure
* * -EINVAL: on a wrong parameter
*/
int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
u8 **buf, bool ascii)
{
struct uc_string_id *uc_str;
u8 *str;
int ret;
if (!buf)
return -EINVAL;
uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!uc_str)
return -ENOMEM;
ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_STRING, desc_index, 0,
(u8 *)uc_str, QUERY_DESC_MAX_SIZE);
if (ret < 0) {
dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n",
QUERY_REQ_RETRIES, ret);
str = NULL;
goto out;
}
if (uc_str->len <= QUERY_DESC_HDR_SIZE) {
dev_dbg(hba->dev, "String Desc is of zero length\n");
str = NULL;
ret = 0;
goto out;
}
if (ascii) {
ssize_t ascii_len;
int i;
/* remove header and divide by 2 to move from UTF16 to UTF8 */
ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1;
str = kzalloc(ascii_len, GFP_KERNEL);
if (!str) {
ret = -ENOMEM;
goto out;
}
/*
* the descriptor contains string in UTF16 format
* we need to convert to utf-8 so it can be displayed
*/
ret = utf16s_to_utf8s(uc_str->uc,
uc_str->len - QUERY_DESC_HDR_SIZE,
UTF16_BIG_ENDIAN, str, ascii_len);
/* replace non-printable or non-ASCII characters with spaces */
for (i = 0; i < ret; i++)
str[i] = ufshcd_remove_non_printable(str[i]);
str[ret++] = '\0';
} else {
str = kmemdup(uc_str, uc_str->len, GFP_KERNEL);
if (!str) {
ret = -ENOMEM;
goto out;
}
ret = uc_str->len;
}
out:
*buf = str;
kfree(uc_str);
return ret;
}
/**
* ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
* @hba: Pointer to adapter instance
* @lun: lun id
* @param_offset: offset of the parameter to read
* @param_read_buf: pointer to buffer where parameter would be read
* @param_size: sizeof(param_read_buf)
*
* Return: 0 in case of success, non-zero otherwise.
*/
static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
int lun,
enum unit_desc_param param_offset,
u8 *param_read_buf,
u32 param_size)
{
/*
* Unit descriptors are only available for general purpose LUs (LUN id
* from 0 to 7) and RPMB Well known LU.
*/
if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
return -EOPNOTSUPP;
return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
param_offset, param_read_buf, param_size);
}
static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba *hba)
{
int err = 0;
u32 gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
if (hba->dev_info.wspecversion >= 0x300) {
err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME, 0, 0,
&gating_wait);
if (err)
dev_err(hba->dev, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
err, gating_wait);
if (gating_wait == 0) {
gating_wait = UFSHCD_REF_CLK_GATING_WAIT_US;
dev_err(hba->dev, "Undefined ref clk gating wait time, use default %uus\n",
gating_wait);
}
hba->dev_info.clk_gating_wait_us = gating_wait;
}
return err;
}
/**
* ufshcd_memory_alloc - allocate memory for host memory space data structures
* @hba: per adapter instance
*
* 1. Allocate DMA memory for Command Descriptor array
* Each command descriptor consist of Command UPIU, Response UPIU and PRDT
* 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
* 3. Allocate DMA memory for UTP Task Management Request Descriptor List
* (UTMRDL)
* 4. Allocate memory for local reference block(lrb).
*
* Return: 0 for success, non-zero in case of failure.
*/
static int ufshcd_memory_alloc(struct ufs_hba *hba)
{
size_t utmrdl_size, utrdl_size, ucdl_size;
/* Allocate memory for UTP command descriptors */
ucdl_size = ufshcd_get_ucd_size(hba) * hba->nutrs;
hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
GFP_KERNEL);
/*
* UFSHCI requires UTP command descriptor to be 128 byte aligned.
*/
if (!hba->ucdl_base_addr ||
WARN_ON(hba->ucdl_dma_addr & (128 - 1))) {
dev_err(hba->dev,
"Command Descriptor Memory allocation failed\n");
goto out;
}
/*
* Allocate memory for UTP Transfer descriptors
* UFSHCI requires 1KB alignment of UTRD
*/
utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
utrdl_size,
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
WARN_ON(hba->utrdl_dma_addr & (SZ_1K - 1))) {
dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
}
/*
* Skip utmrdl allocation; it may have been
* allocated during first pass and not released during
* MCQ memory allocation.
* See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
*/
if (hba->utmrdl_base_addr)
goto skip_utmrdl;
/*
* Allocate memory for UTP Task Management descriptors
* UFSHCI requires 1KB alignment of UTMRD
*/
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
utmrdl_size,
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
WARN_ON(hba->utmrdl_dma_addr & (SZ_1K - 1))) {
dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
}
skip_utmrdl:
/* Allocate memory for local reference block */
hba->lrb = devm_kcalloc(hba->dev,
hba->nutrs, sizeof(struct ufshcd_lrb),
GFP_KERNEL);
if (!hba->lrb) {
dev_err(hba->dev, "LRB Memory allocation failed\n");
goto out;
}
return 0;
out:
return -ENOMEM;
}
/**
* ufshcd_host_memory_configure - configure local reference block with
* memory offsets
* @hba: per adapter instance
*
* Configure Host memory space
* 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
* address.
* 2. Update each UTRD with Response UPIU offset, Response UPIU length
* and PRDT offset.
* 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
* into local reference block.
*/
static void ufshcd_host_memory_configure(struct ufs_hba *hba)
{
struct utp_transfer_req_desc *utrdlp;
dma_addr_t cmd_desc_dma_addr;
dma_addr_t cmd_desc_element_addr;
u16 response_offset;
u16 prdt_offset;
int cmd_desc_size;
int i;
utrdlp = hba->utrdl_base_addr;
response_offset =
offsetof(struct utp_transfer_cmd_desc, response_upiu);
prdt_offset =
offsetof(struct utp_transfer_cmd_desc, prd_table);
cmd_desc_size = ufshcd_get_ucd_size(hba);
cmd_desc_dma_addr = hba->ucdl_dma_addr;
for (i = 0; i < hba->nutrs; i++) {
/* Configure UTRD with command descriptor base address */
cmd_desc_element_addr =
(cmd_desc_dma_addr + (cmd_desc_size * i));
utrdlp[i].command_desc_base_addr =
cpu_to_le64(cmd_desc_element_addr);
/* Response upiu and prdt offset should be in double words */
if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
utrdlp[i].response_upiu_offset =
cpu_to_le16(response_offset);
utrdlp[i].prd_table_offset =
cpu_to_le16(prdt_offset);
utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE);
} else {
utrdlp[i].response_upiu_offset =
cpu_to_le16(response_offset >> 2);
utrdlp[i].prd_table_offset =
cpu_to_le16(prdt_offset >> 2);
utrdlp[i].response_upiu_length =
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
}
ufshcd_init_lrb(hba, &hba->lrb[i], i);
}
}
/**
* ufshcd_dme_link_startup - Notify Unipro to perform link startup
* @hba: per adapter instance
*
* UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
* in order to initialize the Unipro link startup procedure.
* Once the Unipro links are up, the device connected to the controller
* is detected.
*
* Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dme_link_startup(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
int ret;
uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_dbg(hba->dev,
"dme-link-startup: error code %d\n", ret);
return ret;
}
/**
* ufshcd_dme_reset - UIC command for DME_RESET
* @hba: per adapter instance
*
* DME_RESET command is issued in order to reset UniPro stack.
* This function now deals with cold reset.
*
* Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dme_reset(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
int ret;
uic_cmd.command = UIC_CMD_DME_RESET;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
"dme-reset: error code %d\n", ret);
return ret;
}
int ufshcd_dme_configure_adapt(struct ufs_hba *hba,
int agreed_gear,
int adapt_val)
{
int ret;
if (agreed_gear < UFS_HS_G4)
adapt_val = PA_NO_ADAPT;
ret = ufshcd_dme_set(hba,
UIC_ARG_MIB(PA_TXHSADAPTTYPE),
adapt_val);
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt);
/**
* ufshcd_dme_enable - UIC command for DME_ENABLE
* @hba: per adapter instance
*
* DME_ENABLE command is issued in order to enable UniPro stack.
*
* Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dme_enable(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
int ret;
uic_cmd.command = UIC_CMD_DME_ENABLE;
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
"dme-enable: error code %d\n", ret);
return ret;
}
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
{
#define MIN_DELAY_BEFORE_DME_CMDS_US 1000
unsigned long min_sleep_time_us;
if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
return;
/*
* last_dme_cmd_tstamp will be 0 only for 1st call to
* this function
*/
if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
} else {
unsigned long delta =
(unsigned long) ktime_to_us(
ktime_sub(ktime_get(),
hba->last_dme_cmd_tstamp));
if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
min_sleep_time_us =
MIN_DELAY_BEFORE_DME_CMDS_US - delta;
else
return; /* no more delay required */
}
/* allow sleep for extra 50us if needed */
usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
}
/**
* ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
* @hba: per adapter instance
* @attr_sel: uic command argument1
* @attr_set: attribute set type as uic command argument2
* @mib_val: setting value as uic command argument3
* @peer: indicate whether peer or local
*
* Return: 0 on success, non-zero value on failure.
*/
int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
u8 attr_set, u32 mib_val, u8 peer)
{
struct uic_command uic_cmd = {0};
static const char *const action[] = {
"dme-set",
"dme-peer-set"
};
const char *set = action[!!peer];
int ret;
int retries = UFS_UIC_COMMAND_RETRIES;
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
uic_cmd.argument1 = attr_sel;
uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
uic_cmd.argument3 = mib_val;
do {
/* for peer attributes we retry upon failure */
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
} while (ret && peer && --retries);
if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
set, UIC_GET_ATTR_ID(attr_sel), mib_val,
UFS_UIC_COMMAND_RETRIES - retries);
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
/**
* ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
* @hba: per adapter instance
* @attr_sel: uic command argument1
* @mib_val: the value of the attribute as returned by the UIC command
* @peer: indicate whether peer or local
*
* Return: 0 on success, non-zero value on failure.
*/
int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
u32 *mib_val, u8 peer)
{
struct uic_command uic_cmd = {0};
static const char *const action[] = {
"dme-get",
"dme-peer-get"
};
const char *get = action[!!peer];
int ret;
int retries = UFS_UIC_COMMAND_RETRIES;
struct ufs_pa_layer_attr orig_pwr_info;
struct ufs_pa_layer_attr temp_pwr_info;
bool pwr_mode_change = false;
if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
orig_pwr_info = hba->pwr_info;
temp_pwr_info = orig_pwr_info;
if (orig_pwr_info.pwr_tx == FAST_MODE ||
orig_pwr_info.pwr_rx == FAST_MODE) {
temp_pwr_info.pwr_tx = FASTAUTO_MODE;
temp_pwr_info.pwr_rx = FASTAUTO_MODE;
pwr_mode_change = true;
} else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
orig_pwr_info.pwr_rx == SLOW_MODE) {
temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
pwr_mode_change = true;
}
if (pwr_mode_change) {
ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
if (ret)
goto out;
}
}
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
uic_cmd.argument1 = attr_sel;
do {
/* for peer attributes we retry upon failure */
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
get, UIC_GET_ATTR_ID(attr_sel), ret);
} while (ret && peer && --retries);
if (ret)
dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
get, UIC_GET_ATTR_ID(attr_sel),
UFS_UIC_COMMAND_RETRIES - retries);
if (mib_val && !ret)
*mib_val = uic_cmd.argument3;
if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
&& pwr_mode_change)
ufshcd_change_power_mode(hba, &orig_pwr_info);
out:
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
/**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect.
*
* @hba: per adapter instance
* @cmd: UIC command to execute
*
* DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
* DME_HIBERNATE_EXIT commands take some time to take its effect on both host
* and device UniPro link and hence it's final completion would be indicated by
* dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
* addition to normal UIC command completion Status (UCCS). This function only
* returns after the relevant status bits indicate the completion.
*
* Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
{
DECLARE_COMPLETION_ONSTACK(uic_async_done);
unsigned long flags;
u8 status;
int ret;
bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_is_link_broken(hba)) {
ret = -ENOLINK;
goto out_unlock;
}
hba->uic_async_done = &uic_async_done;
if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
/*
* Make sure UIC command completion interrupt is disabled before
* issuing UIC command.
*/
wmb();
reenable_intr = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
ret = __ufshcd_send_uic_cmd(hba, cmd, false);
if (ret) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
cmd->command, cmd->argument3, ret);
goto out;
}
if (!wait_for_completion_timeout(hba->uic_async_done,
msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
cmd->command, cmd->argument3);
if (!cmd->cmd_active) {
dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
__func__);
goto check_upmcrs;
}
ret = -ETIMEDOUT;
goto out;
}
check_upmcrs:
status = ufshcd_get_upmcrs(hba);
if (status != PWR_LOCAL) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
cmd->command, status);
ret = (status != PWR_OK) ? status : -1;
}
out:
if (ret) {
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_evt_hist(hba);
}
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
if (reenable_intr)
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
if (ret) {
ufshcd_set_link_broken(hba);
ufshcd_schedule_eh_work(hba);
}
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
return ret;
}
/**
* ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
* using DME_SET primitives.
* @hba: per adapter instance
* @mode: powr mode value
*
* Return: 0 on success, non-zero value on failure.
*/
int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
{
struct uic_command uic_cmd = {0};
int ret;
if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
ret = ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
if (ret) {
dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
__func__, ret);
goto out;
}
}
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
ufshcd_hold(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
ufshcd_release(hba);
out:
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode);
int ufshcd_link_recovery(struct ufs_hba *hba)
{
int ret;
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->ufshcd_state = UFSHCD_STATE_RESET;
ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Reset the attached device */
ufshcd_device_reset(hba);
ret = ufshcd_host_reset_and_restore(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ret)
hba->ufshcd_state = UFSHCD_STATE_ERROR;
ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (ret)
dev_err(hba->dev, "%s: link recovery failed, err %d",
__func__, ret);
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
int ret;
struct uic_command uic_cmd = {0};
ktime_t start = ktime_get();
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret)
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);
else
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
int ret;
ktime_t start = ktime_get();
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
if (ret) {
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
__func__, ret);
} else {
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
POST_CHANGE);
hba->ufs_stats.last_hibern8_exit_tstamp = local_clock();
hba->ufs_stats.hibern8_exit_cnt++;
}
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit);
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit)
{
unsigned long flags;
bool update = false;
if (!ufshcd_is_auto_hibern8_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ahit != ahit) {
hba->ahit = ahit;
update = true;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (update &&
!pm_runtime_suspended(&hba->ufs_device_wlun->sdev_gendev)) {
ufshcd_rpm_get_sync(hba);
ufshcd_hold(hba);
ufshcd_auto_hibern8_enable(hba);
ufshcd_release(hba);
ufshcd_rpm_put_sync(hba);
}
}
EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update);
void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
if (!ufshcd_is_auto_hibern8_supported(hba))
return;
ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
}
/**
* ufshcd_init_pwr_info - setting the POR (power on reset)
* values in hba power info
* @hba: per-adapter instance
*/
static void ufshcd_init_pwr_info(struct ufs_hba *hba)
{
hba->pwr_info.gear_rx = UFS_PWM_G1;
hba->pwr_info.gear_tx = UFS_PWM_G1;
hba->pwr_info.lane_rx = UFS_LANE_1;
hba->pwr_info.lane_tx = UFS_LANE_1;
hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
hba->pwr_info.hs_rate = 0;
}
/**
* ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
* @hba: per-adapter instance
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
{
struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
if (hba->max_pwr_info.is_valid)
return 0;
if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) {
pwr_info->pwr_tx = FASTAUTO_MODE;
pwr_info->pwr_rx = FASTAUTO_MODE;
} else {
pwr_info->pwr_tx = FAST_MODE;
pwr_info->pwr_rx = FAST_MODE;
}
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
&pwr_info->lane_rx);
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
&pwr_info->lane_tx);
if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
__func__,
pwr_info->lane_rx,
pwr_info->lane_tx);
return -EINVAL;
}
/*
* First, get the maximum gears of HS speed.
* If a zero value, it means there is no HSGEAR capability.
* Then, get the maximum gears of PWM speed.
*/
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
if (!pwr_info->gear_rx) {
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
&pwr_info->gear_rx);
if (!pwr_info->gear_rx) {
dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
__func__, pwr_info->gear_rx);
return -EINVAL;
}
pwr_info->pwr_rx = SLOW_MODE;
}
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
&pwr_info->gear_tx);
if (!pwr_info->gear_tx) {
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
&pwr_info->gear_tx);
if (!pwr_info->gear_tx) {
dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
__func__, pwr_info->gear_tx);
return -EINVAL;
}
pwr_info->pwr_tx = SLOW_MODE;
}
hba->max_pwr_info.is_valid = true;
return 0;
}
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode)
{
int ret;
/* if already configured to the requested pwr_mode */
if (!hba->force_pmc &&
pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
dev_dbg(hba->dev, "%s: power already configured\n", __func__);
return 0;
}
/*
* Configure attributes for power mode change with below.
* - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
* - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
* - PA_HSSERIES
*/
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
pwr_mode->lane_rx);
if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
pwr_mode->pwr_rx == FAST_MODE)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
else
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), false);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
pwr_mode->lane_tx);
if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
pwr_mode->pwr_tx == FAST_MODE)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
else
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), false);
if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
pwr_mode->pwr_tx == FASTAUTO_MODE ||
pwr_mode->pwr_rx == FAST_MODE ||
pwr_mode->pwr_tx == FAST_MODE)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
pwr_mode->hs_rate);
if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
DL_FC0ProtectionTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
DL_TC0ReplayTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
DL_AFC0ReqTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
DL_FC1ProtectionTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
DL_TC1ReplayTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
DL_AFC1ReqTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
DL_FC0ProtectionTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
DL_TC0ReplayTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
DL_AFC0ReqTimeOutVal_Default);
}
ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
| pwr_mode->pwr_tx);
if (ret) {
dev_err(hba->dev,
"%s: power mode change failed %d\n", __func__, ret);
} else {
ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
pwr_mode);
memcpy(&hba->pwr_info, pwr_mode,
sizeof(struct ufs_pa_layer_attr));
}
return ret;
}
/**
* ufshcd_config_pwr_mode - configure a new power mode
* @hba: per-adapter instance
* @desired_pwr_mode: desired power configuration
*
* Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_config_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *desired_pwr_mode)
{
struct ufs_pa_layer_attr final_params = { 0 };
int ret;
ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
desired_pwr_mode, &final_params);
if (ret)
memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
ret = ufshcd_change_power_mode(hba, &final_params);
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
/**
* ufshcd_complete_dev_init() - checks device readiness
* @hba: per-adapter instance
*
* Set fDeviceInit flag and poll until device toggles it.
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
int err;
bool flag_res = true;
ktime_t timeout;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
if (err) {
dev_err(hba->dev,
"%s: setting fDeviceInit flag failed with error %d\n",
__func__, err);
goto out;
}
/* Poll fDeviceInit flag to be cleared */
timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
do {
err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
if (!flag_res)
break;
usleep_range(500, 1000);
} while (ktime_before(ktime_get(), timeout));
if (err) {
dev_err(hba->dev,
"%s: reading fDeviceInit flag failed with error %d\n",
__func__, err);
} else if (flag_res) {
dev_err(hba->dev,
"%s: fDeviceInit was not cleared by the device\n",
__func__);
err = -EBUSY;
}
out:
return err;
}
/**
* ufshcd_make_hba_operational - Make UFS controller operational
* @hba: per adapter instance
*
* To bring UFS host controller to operational state,
* 1. Enable required interrupts
* 2. Configure interrupt aggregation
* 3. Program UTRL and UTMRL base address
* 4. Configure run-stop-registers
*
* Return: 0 on success, non-zero value on failure.
*/
int ufshcd_make_hba_operational(struct ufs_hba *hba)
{
int err = 0;
u32 reg;
/* Enable required interrupts */
ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
/* Configure interrupt aggregation */
if (ufshcd_is_intr_aggr_allowed(hba))
ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
else
ufshcd_disable_intr_aggr(hba);
/* Configure UTRL and UTMRL base address registers */
ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
REG_UTP_TRANSFER_REQ_LIST_BASE_L);
ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
REG_UTP_TRANSFER_REQ_LIST_BASE_H);
ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
REG_UTP_TASK_REQ_LIST_BASE_L);
ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
REG_UTP_TASK_REQ_LIST_BASE_H);
/*
* Make sure base address and interrupt setup are updated before
* enabling the run/stop registers below.
*/
wmb();
/*
* UCRDY, UTMRLDY and UTRLRDY bits must be 1
*/
reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
if (!(ufshcd_get_lists_status(reg))) {
ufshcd_enable_run_stop_reg(hba);
} else {
dev_err(hba->dev,
"Host controller not ready to process requests");
err = -EIO;
}
return err;
}
EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
/**
* ufshcd_hba_stop - Send controller to reset state
* @hba: per adapter instance
*/
void ufshcd_hba_stop(struct ufs_hba *hba)
{
unsigned long flags;
int err;
/*
* Obtain the host lock to prevent that the controller is disabled
* while the UFS interrupt handler is active on another CPU.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
CONTROLLER_ENABLE, CONTROLLER_DISABLE,
10, 1);
if (err)
dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
}
EXPORT_SYMBOL_GPL(ufshcd_hba_stop);
/**
* ufshcd_hba_execute_hce - initialize the controller
* @hba: per adapter instance
*
* The controller resets itself and controller firmware initialization
* sequence kicks off. When controller is ready it will set
* the Host Controller Enable bit to 1.
*
* Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_hba_execute_hce(struct ufs_hba *hba)
{
int retry_outer = 3;
int retry_inner;
start:
if (ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */
ufshcd_hba_stop(hba);
/* UniPro link is disabled at this point */
ufshcd_set_link_off(hba);
ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
/* start controller initialization sequence */
ufshcd_hba_start(hba);
/*
* To initialize a UFS host controller HCE bit must be set to 1.
* During initialization the HCE bit value changes from 1->0->1.
* When the host controller completes initialization sequence
* it sets the value of HCE bit to 1. The same HCE bit is read back
* to check if the controller has completed initialization sequence.
* So without this delay the value HCE = 1, set in the previous
* instruction might be read back.
* This delay can be changed based on the controller.
*/
ufshcd_delay_us(hba->vps->hba_enable_delay_us, 100);
/* wait for the host controller to complete initialization */
retry_inner = 50;
while (!ufshcd_is_hba_active(hba)) {
if (retry_inner) {
retry_inner--;
} else {
dev_err(hba->dev,
"Controller enable failed\n");
if (retry_outer) {
retry_outer--;
goto start;
}
return -EIO;
}
usleep_range(1000, 1100);
}
/* enable UIC related interrupts */
ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
return 0;
}
int ufshcd_hba_enable(struct ufs_hba *hba)
{
int ret;
if (hba->quirks & UFSHCI_QUIRK_BROKEN_HCE) {
ufshcd_set_link_off(hba);
ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
/* enable UIC related interrupts */
ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
ret = ufshcd_dme_reset(hba);
if (ret) {
dev_err(hba->dev, "DME_RESET failed\n");
return ret;
}
ret = ufshcd_dme_enable(hba);
if (ret) {
dev_err(hba->dev, "Enabling DME failed\n");
return ret;
}
ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
} else {
ret = ufshcd_hba_execute_hce(hba);
}
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_hba_enable);
static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
{
int tx_lanes = 0, i, err = 0;
if (!peer)
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
&tx_lanes);
else
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
&tx_lanes);
for (i = 0; i < tx_lanes; i++) {
if (!peer)
err = ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
0);
else
err = ufshcd_dme_peer_set(hba,
UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
0);
if (err) {
dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
__func__, peer, i, err);
break;
}
}
return err;
}
static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
{
return ufshcd_disable_tx_lcc(hba, true);
}
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
{
struct ufs_event_hist *e;
if (id >= UFS_EVT_CNT)
return;
e = &hba->ufs_stats.event[id];
e->val[e->pos] = val;
e->tstamp[e->pos] = local_clock();
e->cnt += 1;
e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
ufshcd_vops_event_notify(hba, id, &val);
}
EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist);
/**
* ufshcd_link_startup - Initialize unipro link startup
* @hba: per adapter instance
*
* Return: 0 for success, non-zero in case of failure.
*/
static int ufshcd_link_startup(struct ufs_hba *hba)
{
int ret;
int retries = DME_LINKSTARTUP_RETRIES;
bool link_startup_again = false;
/*
* If UFS device isn't active then we will have to issue link startup
* 2 times to make sure the device state move to active.
*/
if (!ufshcd_is_ufs_dev_active(hba))
link_startup_again = true;
link_startup:
do {
ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
ret = ufshcd_dme_link_startup(hba);
/* check if device is detected by inter-connect layer */
if (!ret && !ufshcd_is_device_present(hba)) {
ufshcd_update_evt_hist(hba,
UFS_EVT_LINK_STARTUP_FAIL,
0);
dev_err(hba->dev, "%s: Device not present\n", __func__);
ret = -ENXIO;
goto out;
}
/*
* DME link lost indication is only received when link is up,
* but we can't be sure if the link is up until link startup
* succeeds. So reset the local Uni-Pro and try again.
*/
if (ret && retries && ufshcd_hba_enable(hba)) {
ufshcd_update_evt_hist(hba,
UFS_EVT_LINK_STARTUP_FAIL,
(u32)ret);
goto out;
}
} while (ret && retries--);
if (ret) {
/* failed to get the link up... retire */
ufshcd_update_evt_hist(hba,
UFS_EVT_LINK_STARTUP_FAIL,
(u32)ret);
goto out;
}
if (link_startup_again) {
link_startup_again = false;
retries = DME_LINKSTARTUP_RETRIES;
goto link_startup;
}
/* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
ufshcd_init_pwr_info(hba);
ufshcd_print_pwr_info(hba);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
ret = ufshcd_disable_device_tx_lcc(hba);
if (ret)
goto out;
}
/* Include any host controller configuration via UIC commands */
ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
if (ret)
goto out;
/* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
ret = ufshcd_make_hba_operational(hba);
out:
if (ret) {
dev_err(hba->dev, "link startup failed %d\n", ret);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_evt_hist(hba);
}
return ret;
}
/**
* ufshcd_verify_dev_init() - Verify device initialization
* @hba: per-adapter instance
*
* Send NOP OUT UPIU and wait for NOP IN response to check whether the
* device Transport Protocol (UTP) layer is ready after a reset.
* If the UTP layer at the device side is not initialized, it may
* not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
* and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_verify_dev_init(struct ufs_hba *hba)
{
int err = 0;
int retries;
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
hba->nop_out_timeout);
if (!err || err == -ETIMEDOUT)
break;
dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
}
mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba);
if (err)
dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
return err;
}
/**
* ufshcd_setup_links - associate link b/w device wlun and other luns
* @sdev: pointer to SCSI device
* @hba: pointer to ufs hba
*/
static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev)
{
struct device_link *link;
/*
* Device wlun is the supplier & rest of the luns are consumers.
* This ensures that device wlun suspends after all other luns.
*/
if (hba->ufs_device_wlun) {
link = device_link_add(&sdev->sdev_gendev,
&hba->ufs_device_wlun->sdev_gendev,
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE);
if (!link) {
dev_err(&sdev->sdev_gendev, "Failed establishing link - %s\n",
dev_name(&hba->ufs_device_wlun->sdev_gendev));
return;
}
hba->luns_avail--;
/* Ignore REPORT_LUN wlun probing */
if (hba->luns_avail == 1) {
ufshcd_rpm_put(hba);
return;
}
} else {
/*
* Device wlun is probed. The assumption is that WLUNs are
* scanned before other LUNs.
*/
hba->luns_avail--;
}
}
/**
* ufshcd_lu_init - Initialize the relevant parameters of the LU
* @hba: per-adapter instance
* @sdev: pointer to SCSI device
*/
static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev)
{
int len = QUERY_DESC_MAX_SIZE;
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
u8 lun_qdepth = hba->nutrs;
u8 *desc_buf;
int ret;
desc_buf = kzalloc(len, GFP_KERNEL);
if (!desc_buf)
goto set_qdepth;
ret = ufshcd_read_unit_desc_param(hba, lun, 0, desc_buf, len);
if (ret < 0) {
if (ret == -EOPNOTSUPP)
/* If LU doesn't support unit descriptor, its queue depth is set to 1 */
lun_qdepth = 1;
kfree(desc_buf);
goto set_qdepth;
}
if (desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH]) {
/*
* In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
* use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
*/
lun_qdepth = min_t(int, desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH], hba->nutrs);
}
/*
* According to UFS device specification, the write protection mode is only supported by
* normal LU, not supported by WLUN.
*/
if (hba->dev_info.f_power_on_wp_en && lun < hba->dev_info.max_lu_supported &&
!hba->dev_info.is_lu_power_on_wp &&
desc_buf[UNIT_DESC_PARAM_LU_WR_PROTECT] == UFS_LU_POWER_ON_WP)
hba->dev_info.is_lu_power_on_wp = true;
/* In case of RPMB LU, check if advanced RPMB mode is enabled */
if (desc_buf[UNIT_DESC_PARAM_UNIT_INDEX] == UFS_UPIU_RPMB_WLUN &&
desc_buf[RPMB_UNIT_DESC_PARAM_REGION_EN] & BIT(4))
hba->dev_info.b_advanced_rpmb_en = true;
kfree(desc_buf);
set_qdepth:
/*
* For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
* bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
*/
dev_dbg(hba->dev, "Set LU %x queue depth %d\n", lun, lun_qdepth);
scsi_change_queue_depth(sdev, lun_qdepth);
}
/**
* ufshcd_slave_alloc - handle initial SCSI device configurations
* @sdev: pointer to SCSI device
*
* Return: success.
*/
static int ufshcd_slave_alloc(struct scsi_device *sdev)
{
struct ufs_hba *hba;
hba = shost_priv(sdev->host);
/* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
sdev->use_10_for_ms = 1;
/* DBD field should be set to 1 in mode sense(10) */
sdev->set_dbd_for_ms = 1;
/* allow SCSI layer to restart the device in case of errors */
sdev->allow_restart = 1;
/* REPORT SUPPORTED OPERATION CODES is not supported */
sdev->no_report_opcodes = 1;
/* WRITE_SAME command is not supported */
sdev->no_write_same = 1;
ufshcd_lu_init(hba, sdev);
ufshcd_setup_links(hba, sdev);
return 0;
}
/**
* ufshcd_change_queue_depth - change queue depth
* @sdev: pointer to SCSI device
* @depth: required depth to set
*
* Change queue depth and make sure the max. limits are not crossed.
*
* Return: new queue depth.
*/
static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
{
return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
}
/**
* ufshcd_slave_configure - adjust SCSI device configurations
* @sdev: pointer to SCSI device
*
* Return: 0 (success).
*/
static int ufshcd_slave_configure(struct scsi_device *sdev)
{
struct ufs_hba *hba = shost_priv(sdev->host);
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT)
blk_queue_update_dma_alignment(q, SZ_4K - 1);
/*
* Block runtime-pm until all consumers are added.
* Refer ufshcd_setup_links().
*/
if (is_device_wlun(sdev))
pm_runtime_get_noresume(&sdev->sdev_gendev);
else if (ufshcd_is_rpm_autosuspend_allowed(hba))
sdev->rpm_autosuspend = 1;
/*
* Do not print messages during runtime PM to avoid never-ending cycles
* of messages written back to storage by user space causing runtime
* resume, causing more messages and so on.
*/
sdev->silence_suspend = 1;
ufshcd_crypto_register(hba, q);
return 0;
}
/**
* ufshcd_slave_destroy - remove SCSI device configurations
* @sdev: pointer to SCSI device
*/
static void ufshcd_slave_destroy(struct scsi_device *sdev)
{
struct ufs_hba *hba;
unsigned long flags;
hba = shost_priv(sdev->host);
/* Drop the reference as it won't be needed anymore */
if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
spin_lock_irqsave(hba->host->host_lock, flags);
hba->ufs_device_wlun = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
} else if (hba->ufs_device_wlun) {
struct device *supplier = NULL;
/* Ensure UFS Device WLUN exists and does not disappear */
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufs_device_wlun) {
supplier = &hba->ufs_device_wlun->sdev_gendev;
get_device(supplier);
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (supplier) {
/*
* If a LUN fails to probe (e.g. absent BOOT WLUN), the
* device will not have been registered but can still
* have a device link holding a reference to the device.
*/
device_link_remove(&sdev->sdev_gendev, supplier);
put_device(supplier);
}
}
}
/**
* ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
* @lrbp: pointer to local reference block of completed command
* @scsi_status: SCSI command status
*
* Return: value base on SCSI command status.
*/
static inline int
ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
{
int result = 0;
switch (scsi_status) {
case SAM_STAT_CHECK_CONDITION:
ufshcd_copy_sense_data(lrbp);
fallthrough;
case SAM_STAT_GOOD:
result |= DID_OK << 16 | scsi_status;
break;
case SAM_STAT_TASK_SET_FULL:
case SAM_STAT_BUSY:
case SAM_STAT_TASK_ABORTED:
ufshcd_copy_sense_data(lrbp);
result |= scsi_status;
break;
default:
result |= DID_ERROR << 16;
break;
} /* end of switch */
return result;
}
/**
* ufshcd_transfer_rsp_status - Get overall status of the response
* @hba: per adapter instance
* @lrbp: pointer to local reference block of completed command
* @cqe: pointer to the completion queue entry
*
* Return: result of the command to notify SCSI midlayer.
*/
static inline int
ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
struct cq_entry *cqe)
{
int result = 0;
int scsi_status;
enum utp_ocs ocs;
u8 upiu_flags;
u32 resid;
upiu_flags = lrbp->ucd_rsp_ptr->header.flags;
resid = be32_to_cpu(lrbp->ucd_rsp_ptr->sr.residual_transfer_count);
/*
* Test !overflow instead of underflow to support UFS devices that do
* not set either flag.
*/
if (resid && !(upiu_flags & UPIU_RSP_FLAG_OVERFLOW))
scsi_set_resid(lrbp->cmd, resid);
/* overall command status of utrd */
ocs = ufshcd_get_tr_ocs(lrbp, cqe);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR) {
if (lrbp->ucd_rsp_ptr->header.response ||
lrbp->ucd_rsp_ptr->header.status)
ocs = OCS_SUCCESS;
}
switch (ocs) {
case OCS_SUCCESS:
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
switch (ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr)) {
case UPIU_TRANSACTION_RESPONSE:
/*
* get the result based on SCSI status response
* to notify the SCSI midlayer of the command status
*/
scsi_status = lrbp->ucd_rsp_ptr->header.status;
result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
/*
* Currently we are only supporting BKOPs exception
* events hence we can ignore BKOPs exception event
* during power management callbacks. BKOPs exception
* event is not expected to be raised in runtime suspend
* callback as it allows the urgent bkops.
* During system suspend, we are anyway forcefully
* disabling the bkops and if urgent bkops is needed
* it will be enabled on system resume. Long term
* solution could be to abort the system suspend if
* UFS device needs urgent BKOPs.
*/
if (!hba->pm_op_in_progress &&
!ufshcd_eh_in_progress(hba) &&
ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
/* Flushed in suspend */
schedule_work(&hba->eeh_work);
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
result = DID_ERROR << 16;
dev_err(hba->dev,
"Reject UPIU not fully implemented\n");
break;
default:
dev_err(hba->dev,
"Unexpected request response code = %x\n",
result);
result = DID_ERROR << 16;
break;
}
break;
case OCS_ABORTED:
result |= DID_ABORT << 16;
break;
case OCS_INVALID_COMMAND_STATUS:
result |= DID_REQUEUE << 16;
break;
case OCS_INVALID_CMD_TABLE_ATTR:
case OCS_INVALID_PRDT_ATTR:
case OCS_MISMATCH_DATA_BUF_SIZE:
case OCS_MISMATCH_RESP_UPIU_SIZE:
case OCS_PEER_COMM_FAILURE:
case OCS_FATAL_ERROR:
case OCS_DEVICE_FATAL_ERROR:
case OCS_INVALID_CRYPTO_CONFIG:
case OCS_GENERAL_CRYPTO_ERROR:
default:
result |= DID_ERROR << 16;
dev_err(hba->dev,
"OCS error from controller = %x for tag %d\n",
ocs, lrbp->task_tag);
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
break;
} /* end of switch */
if ((host_byte(result) != DID_OK) &&
(host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs)
ufshcd_print_tr(hba, lrbp->task_tag, true);
return result;
}
static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba,
u32 intr_mask)
{
if (!ufshcd_is_auto_hibern8_supported(hba) ||
!ufshcd_is_auto_hibern8_enabled(hba))
return false;
if (!(intr_mask & UFSHCD_UIC_HIBERN8_MASK))
return false;
if (hba->active_uic_cmd &&
(hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_ENTER ||
hba->active_uic_cmd->command == UIC_CMD_DME_HIBER_EXIT))
return false;
return true;
}
/**
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
*
* Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
spin_lock(hba->host->host_lock);
if (ufshcd_is_auto_hibern8_error(hba, intr_status))
hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status);
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
if (!hba->uic_async_done)
hba->active_uic_cmd->cmd_active = 0;
complete(&hba->active_uic_cmd->done);
retval = IRQ_HANDLED;
}
if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) {
hba->active_uic_cmd->cmd_active = 0;
complete(hba->uic_async_done);
retval = IRQ_HANDLED;
}
if (retval == IRQ_HANDLED)
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
UFS_CMD_COMP);
spin_unlock(hba->host->host_lock);
return retval;
}
/* Release the resources allocated for processing a SCSI command. */
void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp)
{
struct scsi_cmnd *cmd = lrbp->cmd;
scsi_dma_unmap(cmd);
ufshcd_release(hba);
ufshcd_clk_scaling_update_busy(hba);
}
/**
* ufshcd_compl_one_cqe - handle a completion queue entry
* @hba: per adapter instance
* @task_tag: the task tag of the request to be completed
* @cqe: pointer to the completion queue entry
*/
void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
struct cq_entry *cqe)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
enum utp_ocs ocs;
lrbp = &hba->lrb[task_tag];
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
if (cmd) {
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_COMP);
cmd->result = ufshcd_transfer_rsp_status(hba, lrbp, cqe);
ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
if (cqe) {
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
lrbp->utr_descriptor_ptr->header.ocs = ocs;
}
complete(hba->dev_cmd.complete);
ufshcd_clk_scaling_update_busy(hba);
}
}
}
/**
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
* @completed_reqs: bitmask that indicates which requests to complete
*/
static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
unsigned long completed_reqs)
{
int tag;
for_each_set_bit(tag, &completed_reqs, hba->nutrs)
ufshcd_compl_one_cqe(hba, tag, NULL);
}
/* Any value that is not an existing queue number is fine for this constant. */
enum {
UFSHCD_POLL_FROM_INTERRUPT_CONTEXT = -1
};
static void ufshcd_clear_polled(struct ufs_hba *hba,
unsigned long *completed_reqs)
{
int tag;
for_each_set_bit(tag, completed_reqs, hba->nutrs) {
struct scsi_cmnd *cmd = hba->lrb[tag].cmd;
if (!cmd)
continue;
if (scsi_cmd_to_rq(cmd)->cmd_flags & REQ_POLLED)
__clear_bit(tag, completed_reqs);
}
}
/*
* Return: > 0 if one or more commands have been completed or 0 if no
* requests have been completed.
*/
static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
{
struct ufs_hba *hba = shost_priv(shost);
unsigned long completed_reqs, flags;
u32 tr_doorbell;
struct ufs_hw_queue *hwq;
if (is_mcq_enabled(hba)) {
hwq = &hba->uhq[queue_num];
return ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
spin_lock_irqsave(&hba->outstanding_lock, flags);
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
"completed: %#lx; outstanding: %#lx\n", completed_reqs,
hba->outstanding_reqs);
if (queue_num == UFSHCD_POLL_FROM_INTERRUPT_CONTEXT) {
/* Do not complete polled requests from interrupt context. */
ufshcd_clear_polled(hba, &completed_reqs);
}
hba->outstanding_reqs &= ~completed_reqs;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (completed_reqs)
__ufshcd_transfer_req_compl(hba, completed_reqs);
return completed_reqs != 0;
}
/**
* ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
* invoked from the error handler context or ufshcd_host_reset_and_restore()
* to complete the pending transfers and free the resources associated with
* the scsi command.
*
* @hba: per adapter instance
* @force_compl: This flag is set to true when invoked
* from ufshcd_host_reset_and_restore() in which case it requires special
* handling because the host controller has been reset by ufshcd_hba_stop().
*/
static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
bool force_compl)
{
struct ufs_hw_queue *hwq;
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
unsigned long flags;
u32 hwq_num, utag;
int tag;
for (tag = 0; tag < hba->nutrs; tag++) {
lrbp = &hba->lrb[tag];
cmd = lrbp->cmd;
if (!ufshcd_cmd_inflight(cmd) ||
test_bit(SCMD_STATE_COMPLETE, &cmd->state))
continue;
utag = blk_mq_unique_tag(scsi_cmd_to_rq(cmd));
hwq_num = blk_mq_unique_tag_to_hwq(utag);
hwq = &hba->uhq[hwq_num];
if (force_compl) {
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
/*
* For those cmds of which the cqes are not present
* in the cq, complete them explicitly.
*/
if (cmd && !test_bit(SCMD_STATE_COMPLETE, &cmd->state)) {
spin_lock_irqsave(&hwq->cq_lock, flags);
set_host_byte(cmd, DID_REQUEUE);
ufshcd_release_scsi_cmd(hba, lrbp);
scsi_done(cmd);
spin_unlock_irqrestore(&hwq->cq_lock, flags);
}
} else {
ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
}
}
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
*
* Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
* In order to prevent other interrupts starvation the DB is read once
* after reset. The down side of this solution is the possibility of
* false interrupt if device completes another request after resetting
* aggregation and before reading the DB.
*/
if (ufshcd_is_intr_aggr_allowed(hba) &&
!(hba->quirks & UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR))
ufshcd_reset_intr_aggr(hba);
if (ufs_fail_completion())
return IRQ_HANDLED;
/*
* Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
* do not want polling to trigger spurious interrupt complaints.
*/
ufshcd_poll(hba->host, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT);
return IRQ_HANDLED;
}
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
{
return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
&ee_ctrl_mask);
}
int ufshcd_write_ee_control(struct ufs_hba *hba)
{
int err;
mutex_lock(&hba->ee_ctrl_mutex);
err = __ufshcd_write_ee_control(hba, hba->ee_ctrl_mask);
mutex_unlock(&hba->ee_ctrl_mutex);
if (err)
dev_err(hba->dev, "%s: failed to write ee control %d\n",
__func__, err);
return err;
}
int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
const u16 *other_mask, u16 set, u16 clr)
{
u16 new_mask, ee_ctrl_mask;
int err = 0;
mutex_lock(&hba->ee_ctrl_mutex);
new_mask = (*mask & ~clr) | set;
ee_ctrl_mask = new_mask | *other_mask;
if (ee_ctrl_mask != hba->ee_ctrl_mask)
err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
/* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
if (!err) {
hba->ee_ctrl_mask = ee_ctrl_mask;
*mask = new_mask;
}
mutex_unlock(&hba->ee_ctrl_mutex);
return err;
}
/**
* ufshcd_disable_ee - disable exception event
* @hba: per-adapter instance
* @mask: exception event to disable
*
* Disables exception event in the device so that the EVENT_ALERT
* bit is not set.
*
* Return: zero on success, non-zero error value on failure.
*/
static inline int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
{
return ufshcd_update_ee_drv_mask(hba, 0, mask);
}
/**
* ufshcd_enable_ee - enable exception event
* @hba: per-adapter instance
* @mask: exception event to enable
*
* Enable corresponding exception event in the device to allow
* device to alert host in critical scenarios.
*
* Return: zero on success, non-zero error value on failure.
*/
static inline int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
{
return ufshcd_update_ee_drv_mask(hba, mask, 0);
}
/**
* ufshcd_enable_auto_bkops - Allow device managed BKOPS
* @hba: per-adapter instance
*
* Allow device to manage background operations on its own. Enabling
* this might lead to inconsistent latencies during normal data transfers
* as the device is allowed to manage its own way of handling background
* operations.
*
* Return: zero on success, non-zero on failure.
*/
static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
{
int err = 0;
if (hba->auto_bkops_enabled)
goto out;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to enable bkops %d\n",
__func__, err);
goto out;
}
hba->auto_bkops_enabled = true;
trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
/* No need of URGENT_BKOPS exception from the device */
err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
if (err)
dev_err(hba->dev, "%s: failed to disable exception event %d\n",
__func__, err);
out:
return err;
}
/**
* ufshcd_disable_auto_bkops - block device in doing background operations
* @hba: per-adapter instance
*
* Disabling background operations improves command response latency but
* has drawback of device moving into critical state where the device is
* not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
* host is idle so that BKOPS are managed effectively without any negative
* impacts.
*
* Return: zero on success, non-zero on failure.
*/
static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
{
int err = 0;
if (!hba->auto_bkops_enabled)
goto out;
/*
* If host assisted BKOPs is to be enabled, make sure
* urgent bkops exception is allowed.
*/
err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
if (err) {
dev_err(hba->dev, "%s: failed to enable exception event %d\n",
__func__, err);
goto out;
}
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
QUERY_FLAG_IDN_BKOPS_EN, 0, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to disable bkops %d\n",
__func__, err);
ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
goto out;
}
hba->auto_bkops_enabled = false;
trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
hba->is_urgent_bkops_lvl_checked = false;
out:
return err;
}
/**
* ufshcd_force_reset_auto_bkops - force reset auto bkops state
* @hba: per adapter instance
*
* After a device reset the device may toggle the BKOPS_EN flag
* to default value. The s/w tracking variables should be updated
* as well. This function would change the auto-bkops state based on
* UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
*/
static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
{
if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
hba->auto_bkops_enabled = false;
hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
ufshcd_enable_auto_bkops(hba);
} else {
hba->auto_bkops_enabled = true;
hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
ufshcd_disable_auto_bkops(hba);
}
hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
hba->is_urgent_bkops_lvl_checked = false;
}
static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
{
return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
}
/**
* ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
* @hba: per-adapter instance
* @status: bkops_status value
*
* Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
* flag in the device to permit background operations if the device
* bkops_status is greater than or equal to "status" argument passed to
* this function, disable otherwise.
*
* Return: 0 for success, non-zero in case of failure.
*
* NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
* to know whether auto bkops is enabled or disabled after this function
* returns control to it.
*/
static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
enum bkops_status status)
{
int err;
u32 curr_status = 0;
err = ufshcd_get_bkops_status(hba, &curr_status);
if (err) {
dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
__func__, err);
goto out;
} else if (curr_status > BKOPS_STATUS_MAX) {
dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
__func__, curr_status);
err = -EINVAL;
goto out;
}
if (curr_status >= status)
err = ufshcd_enable_auto_bkops(hba);
else
err = ufshcd_disable_auto_bkops(hba);
out:
return err;
}
/**
* ufshcd_urgent_bkops - handle urgent bkops exception event
* @hba: per-adapter instance
*
* Enable fBackgroundOpsEn flag in the device to permit background
* operations.
*
* If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
* and negative error value for any other failure.
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_urgent_bkops(struct ufs_hba *hba)
{
return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
}
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
}
static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
{
int err;
u32 curr_status = 0;
if (hba->is_urgent_bkops_lvl_checked)
goto enable_auto_bkops;
err = ufshcd_get_bkops_status(hba, &curr_status);
if (err) {
dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
__func__, err);
goto out;
}
/*
* We are seeing that some devices are raising the urgent bkops
* exception events even when BKOPS status doesn't indicate performace
* impacted or critical. Handle these device by determining their urgent
* bkops status at runtime.
*/
if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
__func__, curr_status);
/* update the current status as the urgent bkops level */
hba->urgent_bkops_lvl = curr_status;
hba->is_urgent_bkops_lvl_checked = true;
}
enable_auto_bkops:
err = ufshcd_enable_auto_bkops(hba);
out:
if (err < 0)
dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
__func__, err);
}
static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
{
u32 value;
if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
return;
dev_info(hba->dev, "exception Tcase %d\n", value - 80);
ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
/*
* A placeholder for the platform vendors to add whatever additional
* steps required
*/
}
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
enum query_opcode opcode = set ? UPIU_QUERY_OPCODE_SET_FLAG :
UPIU_QUERY_OPCODE_CLEAR_FLAG;
index = ufshcd_wb_get_query_index(hba);
return ufshcd_query_flag_retry(hba, opcode, idn, index, NULL);
}
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable)
{
int ret;
if (!ufshcd_is_wb_allowed(hba) ||
hba->dev_info.wb_enabled == enable)
return 0;
ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_EN);
if (ret) {
dev_err(hba->dev, "%s: Write Booster %s failed %d\n",
__func__, enable ? "enabling" : "disabling", ret);
return ret;
}
hba->dev_info.wb_enabled = enable;
dev_dbg(hba->dev, "%s: Write Booster %s\n",
__func__, enable ? "enabled" : "disabled");
return ret;
}
static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba *hba,
bool enable)
{
int ret;
ret = __ufshcd_wb_toggle(hba, enable,
QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8);
if (ret) {
dev_err(hba->dev, "%s: WB-Buf Flush during H8 %s failed %d\n",
__func__, enable ? "enabling" : "disabling", ret);
return;
}
dev_dbg(hba->dev, "%s: WB-Buf Flush during H8 %s\n",
__func__, enable ? "enabled" : "disabled");
}
int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable)
{
int ret;
if (!ufshcd_is_wb_allowed(hba) ||
hba->dev_info.wb_buf_flush_enabled == enable)
return 0;
ret = __ufshcd_wb_toggle(hba, enable, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN);
if (ret) {
dev_err(hba->dev, "%s: WB-Buf Flush %s failed %d\n",
__func__, enable ? "enabling" : "disabling", ret);
return ret;
}
hba->dev_info.wb_buf_flush_enabled = enable;
dev_dbg(hba->dev, "%s: WB-Buf Flush %s\n",
__func__, enable ? "enabled" : "disabled");
return ret;
}
static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
u32 avail_buf)
{
u32 cur_buf;
int ret;
u8 index;
index = ufshcd_wb_get_query_index(hba);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE,
index, 0, &cur_buf);
if (ret) {
dev_err(hba->dev, "%s: dCurWriteBoosterBufferSize read failed %d\n",
__func__, ret);
return false;
}
if (!cur_buf) {
dev_info(hba->dev, "dCurWBBuf: %d WB disabled until free-space is available\n",
cur_buf);
return false;
}
/* Let it continue to flush when available buffer exceeds threshold */
return avail_buf < hba->vps->wb_flush_threshold;
}
static void ufshcd_wb_force_disable(struct ufs_hba *hba)
{
if (ufshcd_is_wb_buf_flush_allowed(hba))
ufshcd_wb_toggle_buf_flush(hba, false);
ufshcd_wb_toggle_buf_flush_during_h8(hba, false);
ufshcd_wb_toggle(hba, false);
hba->caps &= ~UFSHCD_CAP_WB_EN;
dev_info(hba->dev, "%s: WB force disabled\n", __func__);
}
static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba *hba)
{
u32 lifetime;
int ret;
u8 index;
index = ufshcd_wb_get_query_index(hba);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST,
index, 0, &lifetime);
if (ret) {
dev_err(hba->dev,
"%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
__func__, ret);
return false;
}
if (lifetime == UFS_WB_EXCEED_LIFETIME) {
dev_err(hba->dev, "%s: WB buf lifetime is exhausted 0x%02X\n",
__func__, lifetime);
return false;
}
dev_dbg(hba->dev, "%s: WB buf lifetime is 0x%02X\n",
__func__, lifetime);
return true;
}
static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
{
int ret;
u32 avail_buf;
u8 index;
if (!ufshcd_is_wb_allowed(hba))
return false;
if (!ufshcd_is_wb_buf_lifetime_available(hba)) {
ufshcd_wb_force_disable(hba);
return false;
}
/*
* The ufs device needs the vcc to be ON to flush.
* With user-space reduction enabled, it's enough to enable flush
* by checking only the available buffer. The threshold
* defined here is > 90% full.
* With user-space preserved enabled, the current-buffer
* should be checked too because the wb buffer size can reduce
* when disk tends to be full. This info is provided by current
* buffer (dCurrentWriteBoosterBufferSize). There's no point in
* keeping vcc on when current buffer is empty.
*/
index = ufshcd_wb_get_query_index(hba);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE,
index, 0, &avail_buf);
if (ret) {
dev_warn(hba->dev, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
__func__, ret);
return false;
}
if (!hba->dev_info.b_presrv_uspc_en)
return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
}
static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(to_delayed_work(work),
struct ufs_hba,
rpm_dev_flush_recheck_work);
/*
* To prevent unnecessary VCC power drain after device finishes
* WriteBooster buffer flush or Auto BKOPs, force runtime resume
* after a certain delay to recheck the threshold by next runtime
* suspend.
*/
ufshcd_rpm_get_sync(hba);
ufshcd_rpm_put_sync(hba);
}
/**
* ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data
*
* Read bExceptionEventStatus attribute from the device and handle the
* exception event accordingly.
*/
static void ufshcd_exception_event_handler(struct work_struct *work)
{
struct ufs_hba *hba;
int err;
u32 status = 0;
hba = container_of(work, struct ufs_hba, eeh_work);
ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
__func__, err);
goto out;
}
trace_ufshcd_exception_event(dev_name(hba->dev), status);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_BKOPS)
ufshcd_bkops_exception_event_handler(hba);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
ufshcd_temp_exception_event_handler(hba, status);
ufs_debugfs_exception_event(hba, status);
out:
ufshcd_scsi_unblock_requests(hba);
}
/* Complete requests that have door-bell cleared */
static void ufshcd_complete_requests(struct ufs_hba *hba, bool force_compl)
{
if (is_mcq_enabled(hba))
ufshcd_mcq_compl_pending_transfer(hba, force_compl);
else
ufshcd_transfer_req_compl(hba);
ufshcd_tmc_handler(hba);
}
/**
* ufshcd_quirk_dl_nac_errors - This function checks if error handling is
* to recover from the DL NAC errors or not.
* @hba: per-adapter instance
*
* Return: true if error handling is required, false otherwise.
*/
static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
{
unsigned long flags;
bool err_handling = true;
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
* device fatal error and/or DL NAC & REPLAY timeout errors.
*/
if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
goto out;
if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
goto out;
if ((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
int err;
/*
* wait for 50ms to see if we can get any other errors or not.
*/
spin_unlock_irqrestore(hba->host->host_lock, flags);
msleep(50);
spin_lock_irqsave(hba->host->host_lock, flags);
/*
* now check if we have got any other severe errors other than
* DL NAC error?
*/
if ((hba->saved_err & INT_FATAL_ERRORS) ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
goto out;
/*
* As DL NAC is the only error received so far, send out NOP
* command to confirm if link is still active or not.
* - If we don't get any response then do error recovery.
* - If we get response then clear the DL NAC error bit.
*/
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_verify_dev_init(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (err)
goto out;
/* Link seems to be alive hence ignore the DL NAC errors */
if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
hba->saved_err &= ~UIC_ERROR;
/* clear NAC error */
hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
if (!hba->saved_uic_err)
err_handling = false;
}
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
return err_handling;
}
/* host lock must be held before calling this func */
static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
{
return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
(hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
}
void ufshcd_schedule_eh_work(struct ufs_hba *hba)
{
lockdep_assert_held(hba->host->host_lock);
/* handle fatal errors only when link is not in error state */
if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
if (hba->force_reset || ufshcd_is_link_broken(hba) ||
ufshcd_is_saved_err_fatal(hba))
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
else
hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
queue_work(hba->eh_wq, &hba->eh_work);
}
}
static void ufshcd_force_error_recovery(struct ufs_hba *hba)
{
spin_lock_irq(hba->host->host_lock);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
spin_unlock_irq(hba->host->host_lock);
}
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
mutex_lock(&hba->wb_mutex);
down_write(&hba->clk_scaling_lock);
hba->clk_scaling.is_allowed = allow;
up_write(&hba->clk_scaling_lock);
mutex_unlock(&hba->wb_mutex);
}
static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
{
if (suspend) {
if (hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
} else {
ufshcd_clk_scaling_allow(hba, true);
if (hba->clk_scaling.is_enabled)
ufshcd_resume_clkscaling(hba);
}
}
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
ufshcd_rpm_get_sync(hba);
if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
hba->is_sys_suspended) {
enum ufs_pm_op pm_op;
/*
* Don't assume anything of resume, if
* resume fails, irq and clocks can be OFF, and powers
* can be OFF or in LPM.
*/
ufshcd_setup_hba_vreg(hba, true);
ufshcd_enable_irq(hba);
ufshcd_setup_vreg(hba, true);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
ufshcd_hold(hba);
if (!ufshcd_is_clkgating_allowed(hba))
ufshcd_setup_clocks(hba, true);
ufshcd_release(hba);
pm_op = hba->is_sys_suspended ? UFS_SYSTEM_PM : UFS_RUNTIME_PM;
ufshcd_vops_resume(hba, pm_op);
} else {
ufshcd_hold(hba);
if (ufshcd_is_clkscaling_supported(hba) &&
hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
ufshcd_clk_scaling_allow(hba, false);
}
ufshcd_scsi_block_requests(hba);
/* Wait for ongoing ufshcd_queuecommand() calls to finish. */
blk_mq_wait_quiesce_done(&hba->host->tag_set);
cancel_work_sync(&hba->eeh_work);
}
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
ufshcd_scsi_unblock_requests(hba);
ufshcd_release(hba);
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
ufshcd_rpm_put(hba);
}
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
return (!hba->is_powered || hba->shutting_down ||
!hba->ufs_device_wlun ||
hba->ufshcd_state == UFSHCD_STATE_ERROR ||
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
ufshcd_is_link_broken(hba))));
}
#ifdef CONFIG_PM
static void ufshcd_recover_pm_error(struct ufs_hba *hba)
{
struct Scsi_Host *shost = hba->host;
struct scsi_device *sdev;
struct request_queue *q;
int ret;
hba->is_sys_suspended = false;
/*
* Set RPM status of wlun device to RPM_ACTIVE,
* this also clears its runtime error.
*/
ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
/* hba device might have a runtime error otherwise */
if (ret)
ret = pm_runtime_set_active(hba->dev);
/*
* If wlun device had runtime error, we also need to resume those
* consumer scsi devices in case any of them has failed to be
* resumed due to supplier runtime resume failure. This is to unblock
* blk_queue_enter in case there are bios waiting inside it.
*/
if (!ret) {
shost_for_each_device(sdev, shost) {
q = sdev->request_queue;
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
q->rpm_status == RPM_SUSPENDING))
pm_request_resume(q->dev);
}
}
}
#else
static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
{
}
#endif
static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
{
struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
u32 mode;
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
return true;
if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
return true;
return false;
}
static bool ufshcd_abort_one(struct request *rq, void *priv)
{
int *ret = priv;
u32 tag = rq->tag;
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
struct scsi_device *sdev = cmd->device;
struct Scsi_Host *shost = sdev->host;
struct ufs_hba *hba = shost_priv(shost);
*ret = ufshcd_try_to_abort_task(hba, tag);
dev_err(hba->dev, "Aborting tag %d / CDB %#02x %s\n", tag,
hba->lrb[tag].cmd ? hba->lrb[tag].cmd->cmnd[0] : -1,
*ret ? "failed" : "succeeded");
return *ret == 0;
}
/**
* ufshcd_abort_all - Abort all pending commands.
* @hba: Host bus adapter pointer.
*
* Return: true if and only if the host controller needs to be reset.
*/
static bool ufshcd_abort_all(struct ufs_hba *hba)
{
int tag, ret = 0;
blk_mq_tagset_busy_iter(&hba->host->tag_set, ufshcd_abort_one, &ret);
if (ret)
goto out;
/* Clear pending task management requests */
for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
ret = ufshcd_clear_tm_cmd(hba, tag);
if (ret)
goto out;
}
out:
/* Complete the requests that are cleared by s/w */
ufshcd_complete_requests(hba, false);
return ret != 0;
}
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
* @work: pointer to work structure
*/
static void ufshcd_err_handler(struct work_struct *work)
{
int retries = MAX_ERR_HANDLER_RETRIES;
struct ufs_hba *hba;
unsigned long flags;
bool needs_restore;
bool needs_reset;
int pmc_err;
hba = container_of(work, struct ufs_hba, eh_work);
dev_info(hba->dev,
"%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
__func__, ufshcd_state_name[hba->ufshcd_state],
hba->is_powered, hba->shutting_down, hba->saved_err,
hba->saved_uic_err, hba->force_reset,
ufshcd_is_link_broken(hba) ? "; link is broken" : "");
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
up(&hba->host_sem);
return;
}
ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_prepare(hba);
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
again:
needs_restore = false;
needs_reset = false;
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_RESET;
/*
* A full reset and restore might have happened after preparation
* is finished, double check whether we should stop.
*/
if (ufshcd_err_handling_should_stop(hba))
goto skip_err_handling;
if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
bool ret;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
ret = ufshcd_quirk_dl_nac_errors(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (!ret && ufshcd_err_handling_should_stop(hba))
goto skip_err_handling;
}
if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
(hba->saved_uic_err &&
(hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_evt_hist(hba);
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
ufshcd_print_trs_all(hba, pr_prdt);
spin_lock_irqsave(hba->host->host_lock, flags);
}
/*
* if host reset is required then skip clearing the pending
* transfers forcefully because they will get cleared during
* host reset and restore
*/
if (hba->force_reset || ufshcd_is_link_broken(hba) ||
ufshcd_is_saved_err_fatal(hba) ||
((hba->saved_err & UIC_ERROR) &&
(hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))) {
needs_reset = true;
goto do_reset;
}
/*
* If LINERESET was caught, UFS might have been put to PWM mode,
* check if power mode restore is needed.
*/
if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
if (!hba->saved_uic_err)
hba->saved_err &= ~UIC_ERROR;
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (ufshcd_is_pwr_mode_restore_needed(hba))
needs_restore = true;
spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->saved_err && !needs_restore)
goto skip_err_handling;
}
hba->silence_err_logs = true;
/* release lock as clear command might sleep */
spin_unlock_irqrestore(hba->host->host_lock, flags);
needs_reset = ufshcd_abort_all(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->silence_err_logs = false;
if (needs_reset)
goto do_reset;
/*
* After all reqs and tasks are cleared from doorbell,
* now it is safe to retore power mode.
*/
if (needs_restore) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
/*
* Hold the scaling lock just in case dev cmds
* are sent via bsg and/or sysfs.
*/
down_write(&hba->clk_scaling_lock);
hba->force_pmc = true;
pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
if (pmc_err) {
needs_reset = true;
dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
__func__, pmc_err);
}
hba->force_pmc = false;
ufshcd_print_pwr_info(hba);
up_write(&hba->clk_scaling_lock);
spin_lock_irqsave(hba->host->host_lock, flags);
}
do_reset:
/* Fatal errors need reset */
if (needs_reset) {
int err;
hba->force_reset = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_reset_and_restore(hba);
if (err)
dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
__func__, err);
else
ufshcd_recover_pm_error(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
}
skip_err_handling:
if (!needs_reset) {
if (hba->ufshcd_state == UFSHCD_STATE_RESET)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
if (hba->saved_err || hba->saved_uic_err)
dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
__func__, hba->saved_err, hba->saved_uic_err);
}
/* Exit in an operational state or dead */
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
hba->ufshcd_state != UFSHCD_STATE_ERROR) {
if (--retries)
goto again;
hba->ufshcd_state = UFSHCD_STATE_ERROR;
}
ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_err_handling_unprepare(hba);
up(&hba->host_sem);
dev_info(hba->dev, "%s finished; HBA state %s\n", __func__,
ufshcd_state_name[hba->ufshcd_state]);
}
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
*
* Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
irqreturn_t retval = IRQ_NONE;
/* PHY layer error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
(reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_evt_hist(hba, UFS_EVT_PA_ERR, reg);
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
*/
if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
__func__);
/* Got a LINERESET indication. */
if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
struct uic_command *cmd = NULL;
hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
if (hba->uic_async_done && hba->active_uic_cmd)
cmd = hba->active_uic_cmd;
/*
* Ignore the LINERESET during power mode change
* operation via DME_SET command.
*/
if (cmd && (cmd->command == UIC_CMD_DME_SET))
hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
}
retval |= IRQ_HANDLED;
}
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
(reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_evt_hist(hba, UFS_EVT_DL_ERR, reg);
if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
else if (hba->dev_quirks &
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
hba->uic_error |=
UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
}
retval |= IRQ_HANDLED;
}
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
if ((reg & UIC_NETWORK_LAYER_ERROR) &&
(reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_evt_hist(hba, UFS_EVT_NL_ERR, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
(reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
ufshcd_update_evt_hist(hba, UFS_EVT_TL_ERR, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
retval |= IRQ_HANDLED;
}
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
if ((reg & UIC_DME_ERROR) &&
(reg & UIC_DME_ERROR_CODE_MASK)) {
ufshcd_update_evt_hist(hba, UFS_EVT_DME_ERR, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
retval |= IRQ_HANDLED;
}
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error);
return retval;
}
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
* @intr_status: interrupt status generated by the controller
*
* Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
{
bool queue_eh_work = false;
irqreturn_t retval = IRQ_NONE;
spin_lock(hba->host->host_lock);
hba->errors |= UFSHCD_ERROR_MASK & intr_status;
if (hba->errors & INT_FATAL_ERRORS) {
ufshcd_update_evt_hist(hba, UFS_EVT_FATAL_ERR,
hba->errors);
queue_eh_work = true;
}
if (hba->errors & UIC_ERROR) {
hba->uic_error = 0;
retval = ufshcd_update_uic_error(hba);
if (hba->uic_error)
queue_eh_work = true;
}
if (hba->errors & UFSHCD_UIC_HIBERN8_MASK) {
dev_err(hba->dev,
"%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
__func__, (hba->errors & UIC_HIBERNATE_ENTER) ?
"Enter" : "Exit",
hba->errors, ufshcd_get_upmcrs(hba));
ufshcd_update_evt_hist(hba, UFS_EVT_AUTO_HIBERN8_ERR,
hba->errors);
ufshcd_set_link_broken(hba);
queue_eh_work = true;
}
if (queue_eh_work) {
/*
* update the transfer error masks to sticky bits, let's do this
* irrespective of current ufshcd_state.
*/
hba->saved_err |= hba->errors;
hba->saved_uic_err |= hba->uic_error;
/* dump controller state before resetting */
if ((hba->saved_err &
(INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
(hba->saved_uic_err &&
(hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
__func__, hba->saved_err,
hba->saved_uic_err);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
"host_regs: ");
ufshcd_print_pwr_info(hba);
}
ufshcd_schedule_eh_work(hba);
retval |= IRQ_HANDLED;
}
/*
* if (!queue_eh_work) -
* Other errors are either non-fatal where host recovers
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
hba->errors = 0;
hba->uic_error = 0;
spin_unlock(hba->host->host_lock);
return retval;
}
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
*
* Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
unsigned long flags, pending, issued;
irqreturn_t ret = IRQ_NONE;
int tag;
spin_lock_irqsave(hba->host->host_lock, flags);
pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
issued = hba->outstanding_tasks & ~pending;
for_each_set_bit(tag, &issued, hba->nutmrs) {
struct request *req = hba->tmf_rqs[tag];
struct completion *c = req->end_io_data;
complete(c);
ret = IRQ_HANDLED;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
return ret;
}
/**
* ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
* @hba: per adapter instance
*
* Return: IRQ_HANDLED if interrupt is handled.
*/
static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
unsigned long outstanding_cqs;
unsigned int nr_queues;
int i, ret;
u32 events;
ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs);
if (ret)
outstanding_cqs = (1U << hba->nr_hw_queues) - 1;
/* Exclude the poll queues */
nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
for_each_set_bit(i, &outstanding_cqs, nr_queues) {
hwq = &hba->uhq[i];
events = ufshcd_mcq_read_cqis(hba, i);
if (events)
ufshcd_mcq_write_cqis(hba, events, i);
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
return IRQ_HANDLED;
}
/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
*
* Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
irqreturn_t retval = IRQ_NONE;
if (intr_status & UFSHCD_UIC_MASK)
retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UFSHCD_ERROR_MASK || hba->errors)
retval |= ufshcd_check_errors(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
retval |= ufshcd_transfer_req_compl(hba);
if (intr_status & MCQ_CQ_EVENT_STATUS)
retval |= ufshcd_handle_mcq_cq_events(hba);
return retval;
}
/**
* ufshcd_intr - Main interrupt service routine
* @irq: irq number
* @__hba: pointer to adapter instance
*
* Return:
* IRQ_HANDLED - If interrupt is valid
* IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
u32 intr_status, enabled_intr_status = 0;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
int retries = hba->nutrs;
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
hba->ufs_stats.last_intr_status = intr_status;
hba->ufs_stats.last_intr_ts = local_clock();
/*
* There could be max of hba->nutrs reqs in flight and in worst case
* if the reqs get finished 1 by 1 after the interrupt status is
* read, make sure we handle them by checking the interrupt status
* again in a loop until we process all of the reqs before returning.
*/
while (intr_status && retries--) {
enabled_intr_status =
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
if (enabled_intr_status)
retval |= ufshcd_sl_intr(hba, enabled_intr_status);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
}
if (enabled_intr_status && retval == IRQ_NONE &&
(!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL) ||
hba->outstanding_reqs) && !ufshcd_eh_in_progress(hba)) {
dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
__func__,
intr_status,
hba->ufs_stats.last_intr_status,
enabled_intr_status);
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
}
return retval;
}
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
{
int err = 0;
u32 mask = 1 << tag;
unsigned long flags;
if (!test_bit(tag, &hba->outstanding_tasks))
goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_utmrl_clear(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba,
REG_UTP_TASK_REQ_DOOR_BELL,
mask, 0, 1000, 1000);
dev_err(hba->dev, "Clearing task management function with tag %d %s\n",
tag, err ? "succeeded" : "failed");
out:
return err;
}
static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
struct utp_task_req_desc *treq, u8 tm_function)
{
struct request_queue *q = hba->tmf_queue;
struct Scsi_Host *host = hba->host;
DECLARE_COMPLETION_ONSTACK(wait);
struct request *req;
unsigned long flags;
int task_tag, err;
/*
* blk_mq_alloc_request() is used here only to get a free tag.
*/
req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req))
return PTR_ERR(req);
req->end_io_data = &wait;
ufshcd_hold(hba);
spin_lock_irqsave(host->host_lock, flags);
task_tag = req->tag;
WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
task_tag);
hba->tmf_rqs[req->tag] = req;
treq->upiu_req.req_header.task_tag = task_tag;
memcpy(hba->utmrdl_base_addr + task_tag, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, task_tag, tm_function);
/* send command to the controller */
__set_bit(task_tag, &hba->outstanding_tasks);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TASK_REQ_DOOR_BELL);
/* Make sure that doorbell is committed immediately */
wmb();
spin_unlock_irqrestore(host->host_lock, flags);
ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
/* wait until the task management command is completed */
err = wait_for_completion_io_timeout(&wait,
msecs_to_jiffies(TM_CMD_TIMEOUT));
if (!err) {
ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
if (ufshcd_clear_tm_cmd(hba, task_tag))
dev_WARN(hba->dev, "%s: unable to clear tm cmd (slot %d) after timeout\n",
__func__, task_tag);
err = -ETIMEDOUT;
} else {
err = 0;
memcpy(treq, hba->utmrdl_base_addr + task_tag, sizeof(*treq));
ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
}
spin_lock_irqsave(hba->host->host_lock, flags);
hba->tmf_rqs[req->tag] = NULL;
__clear_bit(task_tag, &hba->outstanding_tasks);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba);
blk_mq_free_request(req);
return err;
}
/**
* ufshcd_issue_tm_cmd - issues task management commands to controller
* @hba: per adapter instance
* @lun_id: LUN ID to which TM command is sent
* @task_id: task ID to which the TM command is applicable
* @tm_function: task management function opcode
* @tm_response: task management service response return value
*
* Return: non-zero value on error, zero on success.
*/
static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
u8 tm_function, u8 *tm_response)
{
struct utp_task_req_desc treq = { };
enum utp_ocs ocs_value;
int err;
/* Configure task request descriptor */
treq.header.interrupt = 1;
treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
/* Configure task request UPIU */
treq.upiu_req.req_header.transaction_code = UPIU_TRANSACTION_TASK_REQ;
treq.upiu_req.req_header.lun = lun_id;
treq.upiu_req.req_header.tm_function = tm_function;
/*
* The host shall provide the same value for LUN field in the basic
* header and for Input Parameter.
*/
treq.upiu_req.input_param1 = cpu_to_be32(lun_id);
treq.upiu_req.input_param2 = cpu_to_be32(task_id);
err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
if (err == -ETIMEDOUT)
return err;
ocs_value = treq.header.ocs & MASK_OCS;
if (ocs_value != OCS_SUCCESS)
dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
__func__, ocs_value);
else if (tm_response)
*tm_response = be32_to_cpu(treq.upiu_rsp.output_param1) &
MASK_TM_SERVICE_RESP;
return err;
}
/**
* ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
* @hba: per-adapter instance
* @req_upiu: upiu request
* @rsp_upiu: upiu reply
* @desc_buff: pointer to descriptor buffer, NULL if NA
* @buff_len: descriptor size, 0 if NA
* @cmd_type: specifies the type (NOP, Query...)
* @desc_op: descriptor operation
*
* Those type of requests uses UTP Transfer Request Descriptor - utrd.
* Therefore, it "rides" the device management infrastructure: uses its tag and
* tasks work queues.
*
* Since there is only one available tag for device management commands,
* the caller is expected to hold the hba->dev_cmd.lock mutex.
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu,
u8 *desc_buff, int *buff_len,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err = 0;
u8 upiu_flags;
/* Protects use of hba->reserved_slot. */
lockdep_assert_held(&hba->dev_cmd.lock);
down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
lrbp->cmd = NULL;
lrbp->task_tag = tag;
lrbp->lun = 0;
lrbp->intr_cmd = true;
ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = cmd_type;
if (hba->ufs_version <= ufshci_version(1, 1))
lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
else
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
/* update the task tag in the request upiu */
req_upiu->header.task_tag = tag;
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE, 0);
/* just copy the upiu request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
/* The Data Segment Area is optional depending upon the query
* function value. for WRITE DESCRIPTOR, the data segment
* follows right after the tsf.
*/
memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
*buff_len = 0;
}
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
hba->dev_cmd.complete = &wait;
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
/*
* ignore the returning value here - ufshcd_check_query_response is
* bound to fail since dev_cmd.query and dev_cmd.type were left empty.
* read the response directly ignoring all errors.
*/
ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
/* just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
if (desc_buff && desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
u8 *descp = (u8 *)lrbp->ucd_rsp_ptr + sizeof(*rsp_upiu);
u16 resp_len = be16_to_cpu(lrbp->ucd_rsp_ptr->header
.data_segment_length);
if (*buff_len >= resp_len) {
memcpy(desc_buff, descp, resp_len);
*buff_len = resp_len;
} else {
dev_warn(hba->dev,
"%s: rsp size %d is bigger than buffer size %d",
__func__, resp_len, *buff_len);
*buff_len = 0;
err = -EINVAL;
}
}
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
up_read(&hba->clk_scaling_lock);
return err;
}
/**
* ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
* @hba: per-adapter instance
* @req_upiu: upiu request
* @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
* @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
* @desc_buff: pointer to descriptor buffer, NULL if NA
* @buff_len: descriptor size, 0 if NA
* @desc_op: descriptor operation
*
* Supports UTP Transfer requests (nop and query), and UTP Task
* Management requests.
* It is up to the caller to fill the upiu conent properly, as it will
* be copied without any further input validations.
*
* Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu,
enum upiu_request_transaction msgcode,
u8 *desc_buff, int *buff_len,
enum query_opcode desc_op)
{
int err;
enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY;
struct utp_task_req_desc treq = { };
enum utp_ocs ocs_value;
u8 tm_f = req_upiu->header.tm_function;
switch (msgcode) {
case UPIU_TRANSACTION_NOP_OUT:
cmd_type = DEV_CMD_TYPE_NOP;
fallthrough;
case UPIU_TRANSACTION_QUERY_REQ:
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
desc_buff, buff_len,
cmd_type, desc_op);
mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba);
break;
case UPIU_TRANSACTION_TASK_REQ:
treq.header.interrupt = 1;
treq.header.ocs = OCS_INVALID_COMMAND_STATUS;
memcpy(&treq.upiu_req, req_upiu, sizeof(*req_upiu));
err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
if (err == -ETIMEDOUT)
break;
ocs_value = treq.header.ocs & MASK_OCS;
if (ocs_value != OCS_SUCCESS) {
dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
ocs_value);
break;
}
memcpy(rsp_upiu, &treq.upiu_rsp, sizeof(*rsp_upiu));
break;
default:
err = -EINVAL;
break;
}
return err;
}
/**
* ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
* @hba: per adapter instance
* @req_upiu: upiu request
* @rsp_upiu: upiu reply
* @req_ehs: EHS field which contains Advanced RPMB Request Message
* @rsp_ehs: EHS field which returns Advanced RPMB Response Message
* @sg_cnt: The number of sg lists actually used
* @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
* @dir: DMA direction
*
* Return: zero on success, non-zero on failure.
*/
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs,
struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list,
enum dma_data_direction dir)
{
DECLARE_COMPLETION_ONSTACK(wait);
const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err = 0;
int result;
u8 upiu_flags;
u8 *ehs_data;
u16 ehs_len;
/* Protects use of hba->reserved_slot. */
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
lrbp->cmd = NULL;
lrbp->task_tag = tag;
lrbp->lun = UFS_UPIU_RPMB_WLUN;
lrbp->intr_cmd = true;
ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = DEV_CMD_TYPE_RPMB;
/* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */
lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
/*
* According to UFSHCI 4.0 specification page 24, if EHSLUTRDS is 0, host controller takes
* EHS length from CMD UPIU, and SW driver use EHS Length field in CMD UPIU. if it is 1,
* HW controller takes EHS length from UTRD.
*/
if (hba->capabilities & MASK_EHSLUTRD_SUPPORTED)
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2);
else
ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 0);
/* update the task tag */
req_upiu->header.task_tag = tag;
/* copy the UPIU(contains CDB) request as it is */
memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
/* Copy EHS, starting with byte32, immediately after the CDB package */
memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs));
if (dir != DMA_NONE && sg_list)
ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
hba->dev_cmd.complete = &wait;
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT);
if (!err) {
/* Just copy the upiu response as it is */
memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
/* Get the response UPIU result */
result = (lrbp->ucd_rsp_ptr->header.response << 8) |
lrbp->ucd_rsp_ptr->header.status;
ehs_len = lrbp->ucd_rsp_ptr->header.ehs_length;
/*
* Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
* in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
* Message is 02h
*/
if (ehs_len == 2 && rsp_ehs) {
/*
* ucd_rsp_ptr points to a buffer with a length of 512 bytes
* (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
*/
ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE;
memcpy(rsp_ehs, ehs_data, ehs_len * 32);
}
}
up_read(&hba->clk_scaling_lock);
mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba);
return err ? : result;
}
/**
* ufshcd_eh_device_reset_handler() - Reset a single logical unit.
* @cmd: SCSI command pointer
*
* Return: SUCCESS or FAILED.
*/
static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
unsigned long flags, pending_reqs = 0, not_cleared = 0;
struct Scsi_Host *host;
struct ufs_hba *hba;
struct ufs_hw_queue *hwq;
struct ufshcd_lrb *lrbp;
u32 pos, not_cleared_mask = 0;
int err;
u8 resp = 0xF, lun;
host = cmd->device->host;
hba = shost_priv(host);
lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err)
err = resp;
goto out;
}
if (is_mcq_enabled(hba)) {
for (pos = 0; pos < hba->nutrs; pos++) {
lrbp = &hba->lrb[pos];
if (ufshcd_cmd_inflight(lrbp->cmd) &&
lrbp->lun == lun) {
ufshcd_clear_cmd(hba, pos);
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(lrbp->cmd));
ufshcd_mcq_poll_cqe_lock(hba, hwq);
}
}
err = 0;
goto out;
}
/* clear the commands that were pending for corresponding LUN */
spin_lock_irqsave(&hba->outstanding_lock, flags);
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs)
if (hba->lrb[pos].lun == lun)
__set_bit(pos, &pending_reqs);
hba->outstanding_reqs &= ~pending_reqs;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
for_each_set_bit(pos, &pending_reqs, hba->nutrs) {
if (ufshcd_clear_cmd(hba, pos) < 0) {
spin_lock_irqsave(&hba->outstanding_lock, flags);
not_cleared = 1U << pos &
ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
hba->outstanding_reqs |= not_cleared;
not_cleared_mask |= not_cleared;
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
dev_err(hba->dev, "%s: failed to clear request %d\n",
__func__, pos);
}
}
__ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared_mask);
out:
hba->req_abort_count = 0;
ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, (u32)err);
if (!err) {
err = SUCCESS;
} else {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
err = FAILED;
}
return err;
}
static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
{
struct ufshcd_lrb *lrbp;
int tag;
for_each_set_bit(tag, &bitmap, hba->nutrs) {
lrbp = &hba->lrb[tag];
lrbp->req_abort_skip = true;
}
}
/**
* ufshcd_try_to_abort_task - abort a specific task
* @hba: Pointer to adapter instance
* @tag: Task tag/index to be aborted
*
* Abort the pending command in device by sending UFS_ABORT_TASK task management
* command, and in host controller by clearing the door-bell register. There can
* be race between controller sending the command to the device while abort is
* issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
* really issued and then try to abort it.
*
* Return: zero on success, non-zero on failure.
*/
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
int err = 0;
int poll_cnt;
u8 resp = 0xF;
u32 reg;
for (poll_cnt = 100; poll_cnt; poll_cnt--) {
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
UFS_QUERY_TASK, &resp);
if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
/* cmd pending in the device */
dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
__func__, tag);
break;
} else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
/*
* cmd not pending in the device, check if it is
* in transition.
*/
dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
__func__, tag);
if (is_mcq_enabled(hba)) {
/* MCQ mode */
if (ufshcd_cmd_inflight(lrbp->cmd)) {
/* sleep for max. 200us same delay as in SDB mode */
usleep_range(100, 200);
continue;
}
/* command completed already */
dev_err(hba->dev, "%s: cmd at tag=%d is cleared.\n",
__func__, tag);
goto out;
}
/* Single Doorbell Mode */
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (reg & (1 << tag)) {
/* sleep for max. 200us to stabilize */
usleep_range(100, 200);
continue;
}
/* command completed already */
dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
__func__, tag);
goto out;
} else {
dev_err(hba->dev,
"%s: no response from device. tag = %d, err %d\n",
__func__, tag, err);
if (!err)
err = resp; /* service response error */
goto out;
}
}
if (!poll_cnt) {
err = -EBUSY;
goto out;
}
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
UFS_ABORT_TASK, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err) {
err = resp; /* service response error */
dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
__func__, tag, err);
}
goto out;
}
err = ufshcd_clear_cmd(hba, tag);
if (err)
dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
__func__, tag, err);
out:
return err;
}
/**
* ufshcd_abort - scsi host template eh_abort_handler callback
* @cmd: SCSI command pointer
*
* Return: SUCCESS or FAILED.
*/
static int ufshcd_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
unsigned long flags;
int err = FAILED;
bool outstanding;
u32 reg;
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
ufshcd_hold(hba);
if (!is_mcq_enabled(hba)) {
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!test_bit(tag, &hba->outstanding_reqs)) {
/* If command is already aborted/completed, return FAILED. */
dev_err(hba->dev,
"%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
__func__, tag, hba->outstanding_reqs, reg);
goto release;
}
}
/* Print Transfer Request of aborted task */
dev_info(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
/*
* Print detailed info about aborted request.
* As more than one request might get aborted at the same time,
* print full information only for the first aborted request in order
* to reduce repeated printouts. For other aborted requests only print
* basic details.
*/
scsi_print_command(cmd);
if (!hba->req_abort_count) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, tag);
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
ufshcd_print_tr(hba, tag, true);
} else {
ufshcd_print_tr(hba, tag, false);
}
hba->req_abort_count++;
if (!is_mcq_enabled(hba) && !(reg & (1 << tag))) {
/* only execute this code in single doorbell mode */
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
__ufshcd_transfer_req_compl(hba, 1UL << tag);
goto release;
}
/*
* Task abort to the device W-LUN is illegal. When this command
* will fail, due to spec violation, scsi err handling next step
* will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up
* the lrb taken by this cmd and re-set it in outstanding_reqs,
* then queue the eh_work and bail.
*/
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
spin_lock_irqsave(host->host_lock, flags);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(host->host_lock, flags);
goto release;
}
if (is_mcq_enabled(hba)) {
/* MCQ mode. Branch off to handle abort for mcq mode */
err = ufshcd_mcq_abort(cmd);
goto release;
}
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skipping abort\n", __func__);
ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
goto release;
}
err = ufshcd_try_to_abort_task(hba, tag);
if (err) {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
err = FAILED;
goto release;
}
/*
* Clear the corresponding bit from outstanding_reqs since the command
* has been aborted successfully.
*/
spin_lock_irqsave(&hba->outstanding_lock, flags);
outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
if (outstanding)
ufshcd_release_scsi_cmd(hba, lrbp);
err = SUCCESS;
release:
/* Matches the ufshcd_hold() call at the start of this function. */
ufshcd_release(hba);
return err;
}
/**
* ufshcd_host_reset_and_restore - reset and restore host controller
* @hba: per-adapter instance
*
* Note that host controller reset may issue DME_RESET to
* local and remote (device) Uni-Pro stack and the attributes
* are reset to default state.
*
* Return: zero on success, non-zero on failure.
*/
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
{
int err;
/*
* Stop the host controller and complete the requests
* cleared by h/w
*/
ufshcd_hba_stop(hba);
hba->silence_err_logs = true;
ufshcd_complete_requests(hba, true);
hba->silence_err_logs = false;
/* scale up clocks to max frequency before full reinitialization */
ufshcd_scale_clks(hba, true);
err = ufshcd_hba_enable(hba);
/* Establish the link again and restore the device */
if (!err)
err = ufshcd_probe_hba(hba, false);
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err);
return err;
}
/**
* ufshcd_reset_and_restore - reset and re-initialize host/device
* @hba: per-adapter instance
*
* Reset and recover device, host and re-establish link. This
* is helpful to recover the communication in fatal error conditions.
*
* Return: zero on success, non-zero on failure.
*/
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
u32 saved_err = 0;
u32 saved_uic_err = 0;
int err = 0;
unsigned long flags;
int retries = MAX_HOST_RESET_RETRIES;
spin_lock_irqsave(hba->host->host_lock, flags);
do {
/*
* This is a fresh start, cache and clear saved error first,
* in case new error generated during reset and restore.
*/
saved_err |= hba->saved_err;
saved_uic_err |= hba->saved_uic_err;
hba->saved_err = 0;
hba->saved_uic_err = 0;
hba->force_reset = false;
hba->ufshcd_state = UFSHCD_STATE_RESET;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Reset the attached device */
ufshcd_device_reset(hba);
err = ufshcd_host_reset_and_restore(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (err)
continue;
/* Do not exit unless operational or dead */
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL &&
hba->ufshcd_state != UFSHCD_STATE_ERROR &&
hba->ufshcd_state != UFSHCD_STATE_EH_SCHEDULED_NON_FATAL)
err = -EAGAIN;
} while (err && --retries);
/*
* Inform scsi mid-layer that we did reset and allow to handle
* Unit Attention properly.
*/
scsi_report_bus_reset(hba->host, 0);
if (err) {
hba->ufshcd_state = UFSHCD_STATE_ERROR;
hba->saved_err |= saved_err;
hba->saved_uic_err |= saved_uic_err;
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
return err;
}
/**
* ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
* @cmd: SCSI command pointer
*
* Return: SUCCESS or FAILED.
*/
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
int err = SUCCESS;
unsigned long flags;
struct ufs_hba *hba;
hba = shost_priv(cmd->device->host);
spin_lock_irqsave(hba->host->host_lock, flags);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
spin_unlock_irqrestore(hba->host->host_lock, flags);
flush_work(&hba->eh_work);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
err = FAILED;
spin_unlock_irqrestore(hba->host->host_lock, flags);
return err;
}
/**
* ufshcd_get_max_icc_level - calculate the ICC level
* @sup_curr_uA: max. current supported by the regulator
* @start_scan: row at the desc table to start scan from
* @buff: power descriptor buffer
*
* Return: calculated max ICC level for specific regulator.
*/
static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan,
const char *buff)
{
int i;
int curr_uA;
u16 data;
u16 unit;
for (i = start_scan; i >= 0; i--) {
data = get_unaligned_be16(&buff[2 * i]);
unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
ATTR_ICC_LVL_UNIT_OFFSET;
curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
switch (unit) {
case UFSHCD_NANO_AMP:
curr_uA = curr_uA / 1000;
break;
case UFSHCD_MILI_AMP:
curr_uA = curr_uA * 1000;
break;
case UFSHCD_AMP:
curr_uA = curr_uA * 1000 * 1000;
break;
case UFSHCD_MICRO_AMP:
default:
break;
}
if (sup_curr_uA >= curr_uA)
break;
}
if (i < 0) {
i = 0;
pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
}
return (u32)i;
}
/**
* ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
* In case regulators are not initialized we'll return 0
* @hba: per-adapter instance
* @desc_buf: power descriptor buffer to extract ICC levels from.
*
* Return: calculated ICC level.
*/
static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
const u8 *desc_buf)
{
u32 icc_level = 0;
if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
!hba->vreg_info.vccq2) {
/*
* Using dev_dbg to avoid messages during runtime PM to avoid
* never-ending cycles of messages written back to storage by
* user space causing runtime resume, causing more messages and
* so on.
*/
dev_dbg(hba->dev,
"%s: Regulator capability was not set, actvIccLevel=%d",
__func__, icc_level);
goto out;
}
if (hba->vreg_info.vcc->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vcc->max_uA,
POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
&desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
if (hba->vreg_info.vccq->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vccq->max_uA,
icc_level,
&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
if (hba->vreg_info.vccq2->max_uA)
icc_level = ufshcd_get_max_icc_level(
hba->vreg_info.vccq2->max_uA,
icc_level,
&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
out:
return icc_level;
}
static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba)
{
int ret;
u8 *desc_buf;
u32 icc_level;
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf)
return;
ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0,
desc_buf, QUERY_DESC_MAX_SIZE);
if (ret) {
dev_err(hba->dev,
"%s: Failed reading power descriptor ret = %d",
__func__, ret);
goto out;
}
icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf);
dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level);
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0, &icc_level);
if (ret)
dev_err(hba->dev,
"%s: Failed configuring bActiveICCLevel = %d ret = %d",
__func__, icc_level, ret);
out:
kfree(desc_buf);
}
static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
{
scsi_autopm_get_device(sdev);
blk_pm_runtime_init(sdev->request_queue, &sdev->sdev_gendev);
if (sdev->rpm_autosuspend)
pm_runtime_set_autosuspend_delay(&sdev->sdev_gendev,
RPM_AUTOSUSPEND_DELAY_MS);
scsi_autopm_put_device(sdev);
}
/**
* ufshcd_scsi_add_wlus - Adds required W-LUs
* @hba: per-adapter instance
*
* UFS device specification requires the UFS devices to support 4 well known
* logical units:
* "REPORT_LUNS" (address: 01h)
* "UFS Device" (address: 50h)
* "RPMB" (address: 44h)
* "BOOT" (address: 30h)
* UFS device's power management needs to be controlled by "POWER CONDITION"
* field of SSU (START STOP UNIT) command. But this "power condition" field
* will take effect only when its sent to "UFS device" well known logical unit
* hence we require the scsi_device instance to represent this logical unit in
* order for the UFS host driver to send the SSU command for power management.
*
* We also require the scsi_device instance for "RPMB" (Replay Protected Memory
* Block) LU so user space process can control this LU. User space may also
* want to have access to BOOT LU.
*
* This function adds scsi device instances for each of all well known LUs
* (except "REPORT LUNS" LU).
*
* Return: zero on success (all required W-LUs are added successfully),
* non-zero error value on failure (if failed to add any of the required W-LU).
*/
static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
{
int ret = 0;
struct scsi_device *sdev_boot, *sdev_rpmb;
hba->ufs_device_wlun = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
if (IS_ERR(hba->ufs_device_wlun)) {
ret = PTR_ERR(hba->ufs_device_wlun);
hba->ufs_device_wlun = NULL;
goto out;
}
scsi_device_put(hba->ufs_device_wlun);
sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
if (IS_ERR(sdev_rpmb)) {
ret = PTR_ERR(sdev_rpmb);
goto remove_ufs_device_wlun;
}
ufshcd_blk_pm_runtime_init(sdev_rpmb);
scsi_device_put(sdev_rpmb);
sdev_boot = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
if (IS_ERR(sdev_boot)) {
dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
} else {
ufshcd_blk_pm_runtime_init(sdev_boot);
scsi_device_put(sdev_boot);
}
goto out;
remove_ufs_device_wlun:
scsi_remove_device(hba->ufs_device_wlun);
out:
return ret;
}
static void ufshcd_wb_probe(struct ufs_hba *hba, const u8 *desc_buf)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u8 lun;
u32 d_lu_wb_buf_alloc;
u32 ext_ufs_feature;
if (!ufshcd_is_wb_allowed(hba))
return;
/*
* Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
* UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
* enabled
*/
if (!(dev_info->wspecversion >= 0x310 ||
dev_info->wspecversion == 0x220 ||
(hba->dev_quirks & UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES)))
goto wb_disabled;
ext_ufs_feature = get_unaligned_be32(desc_buf +
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
goto wb_disabled;
/*
* WB may be supported but not configured while provisioning. The spec
* says, in dedicated wb buffer mode, a max of 1 lun would have wb
* buffer configured.
*/
dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
dev_info->b_presrv_uspc_en =
desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
if (!get_unaligned_be32(desc_buf +
DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
goto wb_disabled;
} else {
for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
d_lu_wb_buf_alloc = 0;
ufshcd_read_unit_desc_param(hba,
lun,
UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS,
(u8 *)&d_lu_wb_buf_alloc,
sizeof(d_lu_wb_buf_alloc));
if (d_lu_wb_buf_alloc) {
dev_info->wb_dedicated_lu = lun;
break;
}
}
if (!d_lu_wb_buf_alloc)
goto wb_disabled;
}
if (!ufshcd_is_wb_buf_lifetime_available(hba))
goto wb_disabled;
return;
wb_disabled:
hba->caps &= ~UFSHCD_CAP_WB_EN;
}
static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u32 ext_ufs_feature;
u8 mask = 0;
if (!(hba->caps & UFSHCD_CAP_TEMP_NOTIF) || dev_info->wspecversion < 0x300)
return;
ext_ufs_feature = get_unaligned_be32(desc_buf + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
if (ext_ufs_feature & UFS_DEV_LOW_TEMP_NOTIF)
mask |= MASK_EE_TOO_LOW_TEMP;
if (ext_ufs_feature & UFS_DEV_HIGH_TEMP_NOTIF)
mask |= MASK_EE_TOO_HIGH_TEMP;
if (mask) {
ufshcd_enable_ee(hba, mask);
ufs_hwmon_probe(hba, mask);
}
}
static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u32 ext_ufs_feature;
u32 ext_iid_en = 0;
int err;
/* Only UFS-4.0 and above may support EXT_IID */
if (dev_info->wspecversion < 0x400)
goto out;
ext_ufs_feature = get_unaligned_be32(desc_buf +
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP))
goto out;
err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en);
if (err)
dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err);
out:
dev_info->b_ext_iid_en = ext_iid_en;
}
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups)
{
const struct ufs_dev_quirk *f;
struct ufs_dev_info *dev_info = &hba->dev_info;
if (!fixups)
return;
for (f = fixups; f->quirk; f++) {
if ((f->wmanufacturerid == dev_info->wmanufacturerid ||
f->wmanufacturerid == UFS_ANY_VENDOR) &&
((dev_info->model &&
STR_PRFX_EQUAL(f->model, dev_info->model)) ||
!strcmp(f->model, UFS_ANY_MODEL)))
hba->dev_quirks |= f->quirk;
}
}
EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks);
static void ufs_fixup_device_setup(struct ufs_hba *hba)
{
/* fix by general quirk table */
ufshcd_fixup_dev_quirks(hba, ufs_fixups);
/* allow vendors to fix quirks */
ufshcd_vops_fixup_dev_quirks(hba);
}
static int ufs_get_device_desc(struct ufs_hba *hba)
{
int err;
u8 model_index;
u8 *desc_buf;
struct ufs_dev_info *dev_info = &hba->dev_info;
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
err = -ENOMEM;
goto out;
}
err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf,
QUERY_DESC_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
goto out;
}
/*
* getting vendor (manufacturerID) and Bank Index in big endian
* format
*/
dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
/* getting Specification Version in big endian format */
dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 |
desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1];
dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH];
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index,
&dev_info->model, SD_ASCII_STD);
if (err < 0) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
__func__, err);
goto out;
}
hba->luns_avail = desc_buf[DEVICE_DESC_PARAM_NUM_LU] +
desc_buf[DEVICE_DESC_PARAM_NUM_WLU];
ufs_fixup_device_setup(hba);
ufshcd_wb_probe(hba, desc_buf);
ufshcd_temp_notif_probe(hba, desc_buf);
if (hba->ext_iid_sup)
ufshcd_ext_iid_probe(hba, desc_buf);
/*
* ufshcd_read_string_desc returns size of the string
* reset the error value
*/
err = 0;
out:
kfree(desc_buf);
return err;
}
static void ufs_put_device_desc(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
kfree(dev_info->model);
dev_info->model = NULL;
}
/**
* ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
* @hba: per-adapter instance
*
* PA_TActivate parameter can be tuned manually if UniPro version is less than
* 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
* RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
* the hibern8 exit latency.
*
* Return: zero on success, non-zero error value on failure.
*/
static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
{
int ret = 0;
u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
ret = ufshcd_dme_peer_get(hba,
UIC_ARG_MIB_SEL(
RX_MIN_ACTIVATETIME_CAPABILITY,
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
&peer_rx_min_activatetime);
if (ret)
goto out;
/* make sure proper unit conversion is applied */
tuned_pa_tactivate =
((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
/ PA_TACTIVATE_TIME_UNIT_US);
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
tuned_pa_tactivate);
out:
return ret;
}
/**
* ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
* @hba: per-adapter instance
*
* PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
* 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
* TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
* This optimal value can help reduce the hibern8 exit latency.
*
* Return: zero on success, non-zero error value on failure.
*/
static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
{
int ret = 0;
u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
u32 max_hibern8_time, tuned_pa_hibern8time;
ret = ufshcd_dme_get(hba,
UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
&local_tx_hibern8_time_cap);
if (ret)
goto out;
ret = ufshcd_dme_peer_get(hba,
UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
&peer_rx_hibern8_time_cap);
if (ret)
goto out;
max_hibern8_time = max(local_tx_hibern8_time_cap,
peer_rx_hibern8_time_cap);
/* make sure proper unit conversion is applied */
tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
/ PA_HIBERN8_TIME_UNIT_US);
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
tuned_pa_hibern8time);
out:
return ret;
}
/**
* ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
* less than device PA_TACTIVATE time.
* @hba: per-adapter instance
*
* Some UFS devices require host PA_TACTIVATE to be lower than device
* PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
* for such devices.
*
* Return: zero on success, non-zero error value on failure.
*/
static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
{
int ret = 0;
u32 granularity, peer_granularity;
u32 pa_tactivate, peer_pa_tactivate;
u32 pa_tactivate_us, peer_pa_tactivate_us;
static const u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
&granularity);
if (ret)
goto out;
ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
&peer_granularity);
if (ret)
goto out;
if ((granularity < PA_GRANULARITY_MIN_VAL) ||
(granularity > PA_GRANULARITY_MAX_VAL)) {
dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
__func__, granularity);
return -EINVAL;
}
if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
(peer_granularity > PA_GRANULARITY_MAX_VAL)) {
dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
__func__, peer_granularity);
return -EINVAL;
}
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
if (ret)
goto out;
ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
&peer_pa_tactivate);
if (ret)
goto out;
pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
peer_pa_tactivate_us = peer_pa_tactivate *
gran_to_us_table[peer_granularity - 1];
if (pa_tactivate_us >= peer_pa_tactivate_us) {
u32 new_peer_pa_tactivate;
new_peer_pa_tactivate = pa_tactivate_us /
gran_to_us_table[peer_granularity - 1];
new_peer_pa_tactivate++;
ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
new_peer_pa_tactivate);
}
out:
return ret;
}
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
{
if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
ufshcd_tune_pa_tactivate(hba);
ufshcd_tune_pa_hibern8time(hba);
}
ufshcd_vops_apply_dev_quirks(hba);
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
/* set 1ms timeout for PA_TACTIVATE */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
ufshcd_quirk_tune_host_pa_tactivate(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
{
hba->ufs_stats.hibern8_exit_cnt = 0;
hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
hba->req_abort_count = 0;
}
static int ufshcd_device_geo_params_init(struct ufs_hba *hba)
{
int err;
u8 *desc_buf;
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL);
if (!desc_buf) {
err = -ENOMEM;
goto out;
}
err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0,
desc_buf, QUERY_DESC_MAX_SIZE);
if (err) {
dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n",
__func__, err);
goto out;
}
if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1)
hba->dev_info.max_lu_supported = 32;
else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0)
hba->dev_info.max_lu_supported = 8;
out:
kfree(desc_buf);
return err;
}
struct ufs_ref_clk {
unsigned long freq_hz;
enum ufs_ref_clk_freq val;
};
static const struct ufs_ref_clk ufs_ref_clk_freqs[] = {
{19200000, REF_CLK_FREQ_19_2_MHZ},
{26000000, REF_CLK_FREQ_26_MHZ},
{38400000, REF_CLK_FREQ_38_4_MHZ},
{52000000, REF_CLK_FREQ_52_MHZ},
{0, REF_CLK_FREQ_INVAL},
};
static enum ufs_ref_clk_freq
ufs_get_bref_clk_from_hz(unsigned long freq)
{
int i;
for (i = 0; ufs_ref_clk_freqs[i].freq_hz; i++)
if (ufs_ref_clk_freqs[i].freq_hz == freq)
return ufs_ref_clk_freqs[i].val;
return REF_CLK_FREQ_INVAL;
}
void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk)
{
unsigned long freq;
freq = clk_get_rate(refclk);
hba->dev_ref_clk_freq =
ufs_get_bref_clk_from_hz(freq);
if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
dev_err(hba->dev,
"invalid ref_clk setting = %ld\n", freq);
}
static int ufshcd_set_dev_ref_clk(struct ufs_hba *hba)
{
int err;
u32 ref_clk;
u32 freq = hba->dev_ref_clk_freq;
err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &ref_clk);
if (err) {
dev_err(hba->dev, "failed reading bRefClkFreq. err = %d\n",
err);
goto out;
}
if (ref_clk == freq)
goto out; /* nothing to update */
err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_REF_CLK_FREQ, 0, 0, &freq);
if (err) {
dev_err(hba->dev, "bRefClkFreq setting to %lu Hz failed\n",
ufs_ref_clk_freqs[freq].freq_hz);
goto out;
}
dev_dbg(hba->dev, "bRefClkFreq setting to %lu Hz succeeded\n",
ufs_ref_clk_freqs[freq].freq_hz);
out:
return err;
}
static int ufshcd_device_params_init(struct ufs_hba *hba)
{
bool flag;
int ret;
/* Init UFS geometry descriptor related parameters */
ret = ufshcd_device_geo_params_init(hba);
if (ret)
goto out;
/* Check and apply UFS device quirks */
ret = ufs_get_device_desc(hba);
if (ret) {
dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
__func__, ret);
goto out;
}
ufshcd_get_ref_clk_gating_wait(hba);
if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
QUERY_FLAG_IDN_PWR_ON_WPE, 0, &flag))
hba->dev_info.f_power_on_wp_en = flag;
/* Probe maximum power mode co-supported by both UFS host and device */
if (ufshcd_get_max_pwr_mode(hba))
dev_err(hba->dev,
"%s: Failed getting max supported power mode\n",
__func__);
out:
return ret;
}
static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
{
int err;
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
struct ufs_dev_info *dev_info = &hba->dev_info;
struct utp_upiu_query_v4_0 *upiu_data;
if (dev_info->wspecversion < 0x400)
return;
ufshcd_hold(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response,
UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_TIMESTAMP, 0, 0);
request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
upiu_data = (struct utp_upiu_query_v4_0 *)&request->upiu_req;
put_unaligned_be64(ktime_get_real_ns(), &upiu_data->osf3);
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err)
dev_err(hba->dev, "%s: failed to set timestamp %d\n",
__func__, err);
mutex_unlock(&hba->dev_cmd.lock);
ufshcd_release(hba);
}
/**
* ufshcd_add_lus - probe and add UFS logical units
* @hba: per-adapter instance
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_add_lus(struct ufs_hba *hba)
{
int ret;
/* Add required well known logical units to scsi mid layer */
ret = ufshcd_scsi_add_wlus(hba);
if (ret)
goto out;
/* Initialize devfreq after UFS device is detected */
if (ufshcd_is_clkscaling_supported(hba)) {
memcpy(&hba->clk_scaling.saved_pwr_info,
&hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
hba->clk_scaling.is_allowed = true;
ret = ufshcd_devfreq_init(hba);
if (ret)
goto out;
hba->clk_scaling.is_enabled = true;
ufshcd_init_clk_scaling_sysfs(hba);
}
ufs_bsg_probe(hba);
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
out:
return ret;
}
/* SDB - Single Doorbell */
static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs)
{
size_t ucdl_size, utrdl_size;
ucdl_size = ufshcd_get_ucd_size(hba) * nutrs;
dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr,
hba->ucdl_dma_addr);
utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs;
dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr,
hba->utrdl_dma_addr);
devm_kfree(hba->dev, hba->lrb);
}
static int ufshcd_alloc_mcq(struct ufs_hba *hba)
{
int ret;
int old_nutrs = hba->nutrs;
ret = ufshcd_mcq_decide_queue_depth(hba);
if (ret < 0)
return ret;
hba->nutrs = ret;
ret = ufshcd_mcq_init(hba);
if (ret)
goto err;
/*
* Previously allocated memory for nutrs may not be enough in MCQ mode.
* Number of supported tags in MCQ mode may be larger than SDB mode.
*/
if (hba->nutrs != old_nutrs) {
ufshcd_release_sdb_queue(hba, old_nutrs);
ret = ufshcd_memory_alloc(hba);
if (ret)
goto err;
ufshcd_host_memory_configure(hba);
}
ret = ufshcd_mcq_memory_alloc(hba);
if (ret)
goto err;
return 0;
err:
hba->nutrs = old_nutrs;
return ret;
}
static void ufshcd_config_mcq(struct ufs_hba *hba)
{
int ret;
u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
intrs = UFSHCD_ENABLE_MCQ_INTRS;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
intrs &= ~MCQ_CQ_EVENT_STATUS;
ufshcd_enable_intr(hba, intrs);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED;
/* Select MCQ mode */
ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
REG_UFS_MEM_CFG);
hba->mcq_enabled = true;
dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT],
hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL],
hba->nutrs);
}
static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params)
{
int ret;
struct Scsi_Host *host = hba->host;
hba->ufshcd_state = UFSHCD_STATE_RESET;
ret = ufshcd_link_startup(hba);
if (ret)
return ret;
if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION)
return ret;
/* Debug counters initialization */
ufshcd_clear_dbg_ufs_stats(hba);
/* UniPro link is active now */
ufshcd_set_link_active(hba);
/* Reconfigure MCQ upon reset */
if (is_mcq_enabled(hba) && !init_dev_params)
ufshcd_config_mcq(hba);
/* Verify device initialization by sending NOP OUT UPIU */
ret = ufshcd_verify_dev_init(hba);
if (ret)
return ret;
/* Initiate UFS initialization, and waiting until completion */
ret = ufshcd_complete_dev_init(hba);
if (ret)
return ret;
/*
* Initialize UFS device parameters used by driver, these
* parameters are associated with UFS descriptors.
*/
if (init_dev_params) {
ret = ufshcd_device_params_init(hba);
if (ret)
return ret;
if (is_mcq_supported(hba) && !hba->scsi_host_added) {
ret = ufshcd_alloc_mcq(hba);
if (!ret) {
ufshcd_config_mcq(hba);
} else {
/* Continue with SDB mode */
use_mcq_mode = false;
dev_err(hba->dev, "MCQ mode is disabled, err=%d\n",
ret);
}
ret = scsi_add_host(host, hba->dev);
if (ret) {
dev_err(hba->dev, "scsi_add_host failed\n");
return ret;
}
hba->scsi_host_added = true;
} else if (is_mcq_supported(hba)) {
/* UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is set */
ufshcd_config_mcq(hba);
}
}
ufshcd_tune_unipro_params(hba);
/* UFS device is also active now */
ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
ufshcd_set_timestamp_attr(hba);
/* Gear up to HS gear if supported */
if (hba->max_pwr_info.is_valid) {
/*
* Set the right value to bRefClkFreq before attempting to
* switch to HS gears.
*/
if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
ufshcd_set_dev_ref_clk(hba);
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
return ret;
}
}
return 0;
}
/**
* ufshcd_probe_hba - probe hba to detect device and initialize it
* @hba: per-adapter instance
* @init_dev_params: whether or not to call ufshcd_device_params_init().
*
* Execute link-startup and verify device initialization
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params)
{
ktime_t start = ktime_get();
unsigned long flags;
int ret;
ret = ufshcd_device_init(hba, init_dev_params);
if (ret)
goto out;
if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) {
/* Reset the device and controller before doing reinit */
ufshcd_device_reset(hba);
ufshcd_hba_stop(hba);
ufshcd_vops_reinit_notify(hba);
ret = ufshcd_hba_enable(hba);
if (ret) {
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
goto out;
}
/* Reinit the device */
ret = ufshcd_device_init(hba, init_dev_params);
if (ret)
goto out;
}
ufshcd_print_pwr_info(hba);
/*
* bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
* and for removable UFS card as well, hence always set the parameter.
* Note: Error handler may issue the device reset hence resetting
* bActiveICCLevel as well so it is always safe to set this here.
*/
ufshcd_set_active_icc_lvl(hba);
/* Enable UFS Write Booster if supported */
ufshcd_configure_wb(hba);
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
out:
spin_lock_irqsave(hba->host->host_lock, flags);
if (ret)
hba->ufshcd_state = UFSHCD_STATE_ERROR;
else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
trace_ufshcd_init(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
}
/**
* ufshcd_async_scan - asynchronous execution for probing hba
* @data: data pointer to pass to this function
* @cookie: cookie data
*/
static void ufshcd_async_scan(void *data, async_cookie_t cookie)
{
struct ufs_hba *hba = (struct ufs_hba *)data;
int ret;
down(&hba->host_sem);
/* Initialize hba, detect and initialize UFS device */
ret = ufshcd_probe_hba(hba, true);
up(&hba->host_sem);
if (ret)
goto out;
/* Probe and add UFS logical units */
ret = ufshcd_add_lus(hba);
out:
/*
* If we failed to initialize the device or the device is not
* present, turn off the power/clocks etc.
*/
if (ret) {
pm_runtime_put_sync(hba->dev);
ufshcd_hba_exit(hba);
}
}
static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
{
struct ufs_hba *hba = shost_priv(scmd->device->host);
if (!hba->system_suspending) {
/* Activate the error handler in the SCSI core. */
return SCSI_EH_NOT_HANDLED;
}
/*
* If we get here we know that no TMFs are outstanding and also that
* the only pending command is a START STOP UNIT command. Handle the
* timeout of that command directly to prevent a deadlock between
* ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
*/
ufshcd_link_recovery(hba);
dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
__func__, hba->outstanding_tasks);
return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
}
static const struct attribute_group *ufshcd_driver_groups[] = {
&ufs_sysfs_unit_descriptor_group,
&ufs_sysfs_lun_attributes_group,
NULL,
};
static struct ufs_hba_variant_params ufs_hba_vps = {
.hba_enable_delay_us = 1000,
.wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(40),
.devfreq_profile.polling_ms = 100,
.devfreq_profile.target = ufshcd_devfreq_target,
.devfreq_profile.get_dev_status = ufshcd_devfreq_get_dev_status,
.ondemand_data.upthreshold = 70,
.ondemand_data.downdifferential = 5,
};
static const struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
.proc_name = UFSHCD,
.map_queues = ufshcd_map_queues,
.queuecommand = ufshcd_queuecommand,
.mq_poll = ufshcd_poll,
.slave_alloc = ufshcd_slave_alloc,
.slave_configure = ufshcd_slave_configure,
.slave_destroy = ufshcd_slave_destroy,
.change_queue_depth = ufshcd_change_queue_depth,
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
.eh_host_reset_handler = ufshcd_eh_host_reset_handler,
.eh_timed_out = ufshcd_eh_timed_out,
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
.can_queue = UFSHCD_CAN_QUEUE,
.max_segment_size = PRDT_DATA_BYTE_COUNT_MAX,
.max_sectors = SZ_1M / SECTOR_SIZE,
.max_host_blocked = 1,
.track_queue_depth = 1,
.skip_settle_delay = 1,
.sdev_groups = ufshcd_driver_groups,
.rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS,
};
static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
int ua)
{
int ret;
if (!vreg)
return 0;
/*
* "set_load" operation shall be required on those regulators
* which specifically configured current limitation. Otherwise
* zero max_uA may cause unexpected behavior when regulator is
* enabled or set as high power mode.
*/
if (!vreg->max_uA)
return 0;
ret = regulator_set_load(vreg->reg, ua);
if (ret < 0) {
dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
__func__, vreg->name, ua, ret);
}
return ret;
}
static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
struct ufs_vreg *vreg)
{
return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
}
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg)
{
if (!vreg)
return 0;
return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
}
static int ufshcd_config_vreg(struct device *dev,
struct ufs_vreg *vreg, bool on)
{
if (regulator_count_voltages(vreg->reg) <= 0)
return 0;
return ufshcd_config_vreg_load(dev, vreg, on ? vreg->max_uA : 0);
}
static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
if (!vreg || vreg->enabled)
goto out;
ret = ufshcd_config_vreg(dev, vreg, true);
if (!ret)
ret = regulator_enable(vreg->reg);
if (!ret)
vreg->enabled = true;
else
dev_err(dev, "%s: %s enable failed, err=%d\n",
__func__, vreg->name, ret);
out:
return ret;
}
static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
if (!vreg || !vreg->enabled || vreg->always_on)
goto out;
ret = regulator_disable(vreg->reg);
if (!ret) {
/* ignore errors on applying disable config */
ufshcd_config_vreg(dev, vreg, false);
vreg->enabled = false;
} else {
dev_err(dev, "%s: %s disable failed, err=%d\n",
__func__, vreg->name, ret);
}
out:
return ret;
}
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
{
int ret = 0;
struct device *dev = hba->dev;
struct ufs_vreg_info *info = &hba->vreg_info;
ret = ufshcd_toggle_vreg(dev, info->vcc, on);
if (ret)
goto out;
ret = ufshcd_toggle_vreg(dev, info->vccq, on);
if (ret)
goto out;
ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
out:
if (ret) {
ufshcd_toggle_vreg(dev, info->vccq2, false);
ufshcd_toggle_vreg(dev, info->vccq, false);
ufshcd_toggle_vreg(dev, info->vcc, false);
}
return ret;
}
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
{
struct ufs_vreg_info *info = &hba->vreg_info;
return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
}
int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
if (!vreg)
goto out;
vreg->reg = devm_regulator_get(dev, vreg->name);
if (IS_ERR(vreg->reg)) {
ret = PTR_ERR(vreg->reg);
dev_err(dev, "%s: %s get failed, err=%d\n",
__func__, vreg->name, ret);
}
out:
return ret;
}
EXPORT_SYMBOL_GPL(ufshcd_get_vreg);
static int ufshcd_init_vreg(struct ufs_hba *hba)
{
int ret = 0;
struct device *dev = hba->dev;
struct ufs_vreg_info *info = &hba->vreg_info;
ret = ufshcd_get_vreg(dev, info->vcc);
if (ret)
goto out;
ret = ufshcd_get_vreg(dev, info->vccq);
if (!ret)
ret = ufshcd_get_vreg(dev, info->vccq2);
out:
return ret;
}
static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
{
struct ufs_vreg_info *info = &hba->vreg_info;
return ufshcd_get_vreg(hba->dev, info->vdd_hba);
}
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
{
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
unsigned long flags;
ktime_t start = ktime_get();
bool clk_state_changed = false;
if (list_empty(head))
goto out;
ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
if (ret)
return ret;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
/*
* Don't disable clocks which are needed
* to keep the link active.
*/
if (ufshcd_is_link_active(hba) &&
clki->keep_link_active)
continue;
clk_state_changed = on ^ clki->enabled;
if (on && !clki->enabled) {
ret = clk_prepare_enable(clki->clk);
if (ret) {
dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
__func__, clki->name, ret);
goto out;
}
} else if (!on && clki->enabled) {
clk_disable_unprepare(clki->clk);
}
clki->enabled = on;
dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
clki->name, on ? "en" : "dis");
}
}
ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
if (ret)
return ret;
out:
if (ret) {
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
}
} else if (!ret && on) {
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
if (clk_state_changed)
trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
(on ? "on" : "off"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
}
static enum ufs_ref_clk_freq ufshcd_parse_ref_clk_property(struct ufs_hba *hba)
{
u32 freq;
int ret = device_property_read_u32(hba->dev, "ref-clk-freq", &freq);
if (ret) {
dev_dbg(hba->dev, "Cannot query 'ref-clk-freq' property = %d", ret);
return REF_CLK_FREQ_INVAL;
}
return ufs_get_bref_clk_from_hz(freq);
}
static int ufshcd_init_clocks(struct ufs_hba *hba)
{
int ret = 0;
struct ufs_clk_info *clki;
struct device *dev = hba->dev;
struct list_head *head = &hba->clk_list_head;
if (list_empty(head))
goto out;
list_for_each_entry(clki, head, list) {
if (!clki->name)
continue;
clki->clk = devm_clk_get(dev, clki->name);
if (IS_ERR(clki->clk)) {
ret = PTR_ERR(clki->clk);
dev_err(dev, "%s: %s clk get failed, %d\n",
__func__, clki->name, ret);
goto out;
}
/*
* Parse device ref clk freq as per device tree "ref_clk".
* Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
* in ufshcd_alloc_host().
*/
if (!strcmp(clki->name, "ref_clk"))
ufshcd_parse_dev_ref_clk_freq(hba, clki->clk);
if (clki->max_freq) {
ret = clk_set_rate(clki->clk, clki->max_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
__func__, clki->name,
clki->max_freq, ret);
goto out;
}
clki->curr_freq = clki->max_freq;
}
dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
clki->name, clk_get_rate(clki->clk));
}
out:
return ret;
}
static int ufshcd_variant_hba_init(struct ufs_hba *hba)
{
int err = 0;
if (!hba->vops)
goto out;
err = ufshcd_vops_init(hba);
if (err)
dev_err_probe(hba->dev, err,
"%s: variant %s init failed with err %d\n",
__func__, ufshcd_get_var_name(hba), err);
out:
return err;
}
static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
{
if (!hba->vops)
return;
ufshcd_vops_exit(hba);
}
static int ufshcd_hba_init(struct ufs_hba *hba)
{
int err;
/*
* Handle host controller power separately from the UFS device power
* rails as it will help controlling the UFS host controller power
* collapse easily which is different than UFS device power collapse.
* Also, enable the host controller power before we go ahead with rest
* of the initialization here.
*/
err = ufshcd_init_hba_vreg(hba);
if (err)
goto out;
err = ufshcd_setup_hba_vreg(hba, true);
if (err)
goto out;
err = ufshcd_init_clocks(hba);
if (err)
goto out_disable_hba_vreg;
if (hba->dev_ref_clk_freq == REF_CLK_FREQ_INVAL)
hba->dev_ref_clk_freq = ufshcd_parse_ref_clk_property(hba);
err = ufshcd_setup_clocks(hba, true);
if (err)
goto out_disable_hba_vreg;
err = ufshcd_init_vreg(hba);
if (err)
goto out_disable_clks;
err = ufshcd_setup_vreg(hba, true);
if (err)
goto out_disable_clks;
err = ufshcd_variant_hba_init(hba);
if (err)
goto out_disable_vreg;
ufs_debugfs_hba_init(hba);
hba->is_powered = true;
goto out;
out_disable_vreg:
ufshcd_setup_vreg(hba, false);
out_disable_clks:
ufshcd_setup_clocks(hba, false);
out_disable_hba_vreg:
ufshcd_setup_hba_vreg(hba, false);
out:
return err;
}
static void ufshcd_hba_exit(struct ufs_hba *hba)
{
if (hba->is_powered) {
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
if (hba->eh_wq)
destroy_workqueue(hba->eh_wq);
ufs_debugfs_hba_exit(hba);
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
ufs_put_device_desc(hba);
}
}
static int ufshcd_execute_start_stop(struct scsi_device *sdev,
enum ufs_dev_pwr_mode pwr_mode,
struct scsi_sense_hdr *sshdr)
{
const unsigned char cdb[6] = { START_STOP, 0, 0, 0, pwr_mode << 4, 0 };
const struct scsi_exec_args args = {
.sshdr = sshdr,
.req_flags = BLK_MQ_REQ_PM,
.scmd_flags = SCMD_FAIL_IF_RECOVERING,
};
return scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, /*buffer=*/NULL,
/*bufflen=*/0, /*timeout=*/10 * HZ, /*retries=*/0,
&args);
}
/**
* ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
* power mode
* @hba: per adapter instance
* @pwr_mode: device power mode to set
*
* Return: 0 if requested power mode is set successfully;
* < 0 if failed to set the requested power mode.
*/
static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
enum ufs_dev_pwr_mode pwr_mode)
{
struct scsi_sense_hdr sshdr;
struct scsi_device *sdp;
unsigned long flags;
int ret, retries;
spin_lock_irqsave(hba->host->host_lock, flags);
sdp = hba->ufs_device_wlun;
if (sdp && scsi_device_online(sdp))
ret = scsi_device_get(sdp);
else
ret = -ENODEV;
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (ret)
return ret;
/*
* If scsi commands fail, the scsi mid-layer schedules scsi error-
* handling, which would wait for host to be resumed. Since we know
* we are functional while we are here, skip host resume in error
* handling context.
*/
hba->host->eh_noresume = 1;
/*
* Current function would be generally called from the power management
* callbacks hence set the RQF_PM flag so that it doesn't resume the
* already suspended childs.
*/
for (retries = 3; retries > 0; --retries) {
ret = ufshcd_execute_start_stop(sdp, pwr_mode, &sshdr);
/*
* scsi_execute() only returns a negative value if the request
* queue is dying.
*/
if (ret <= 0)
break;
}
if (ret) {
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
if (ret > 0) {
if (scsi_sense_valid(&sshdr))
scsi_print_sense_hdr(sdp, NULL, &sshdr);
ret = -EIO;
}
} else {
hba->curr_dev_pwr_mode = pwr_mode;
}
scsi_device_put(sdp);
hba->host->eh_noresume = 0;
return ret;
}
static int ufshcd_link_state_transition(struct ufs_hba *hba,
enum uic_link_state req_link_state,
bool check_for_bkops)
{
int ret = 0;
if (req_link_state == hba->uic_link_state)
return 0;
if (req_link_state == UIC_LINK_HIBERN8_STATE) {
ret = ufshcd_uic_hibern8_enter(hba);
if (!ret) {
ufshcd_set_link_hibern8(hba);
} else {
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
__func__, ret);
goto out;
}
}
/*
* If autobkops is enabled, link can't be turned off because
* turning off the link would also turn off the device, except in the
* case of DeepSleep where the device is expected to remain powered.
*/
else if ((req_link_state == UIC_LINK_OFF_STATE) &&
(!check_for_bkops || !hba->auto_bkops_enabled)) {
/*
* Let's make sure that link is in low power mode, we are doing
* this currently by putting the link in Hibern8. Otherway to
* put the link in low power mode is to send the DME end point
* to device and then send the DME reset command to local
* unipro. But putting the link in hibern8 is much faster.
*
* Note also that putting the link in Hibern8 is a requirement
* for entering DeepSleep.
*/
ret = ufshcd_uic_hibern8_enter(hba);
if (ret) {
dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
__func__, ret);
goto out;
}
/*
* Change controller state to "reset state" which
* should also put the link in off/reset state
*/
ufshcd_hba_stop(hba);
/*
* TODO: Check if we need any delay to make sure that
* controller is reset
*/
ufshcd_set_link_off(hba);
}
out:
return ret;
}
static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
{
bool vcc_off = false;
/*
* It seems some UFS devices may keep drawing more than sleep current
* (atleast for 500us) from UFS rails (especially from VCCQ rail).
* To avoid this situation, add 2ms delay before putting these UFS
* rails in LPM mode.
*/
if (!ufshcd_is_link_active(hba) &&
hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
usleep_range(2000, 2100);
/*
* If UFS device is either in UFS_Sleep turn off VCC rail to save some
* power.
*
* If UFS device and link is in OFF state, all power supplies (VCC,
* VCCQ, VCCQ2) can be turned off if power on write protect is not
* required. If UFS link is inactive (Hibern8 or OFF state) and device
* is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
*
* Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
* in low power state which would save some power.
*
* If Write Booster is enabled and the device needs to flush the WB
* buffer OR if bkops status is urgent for WB, keep Vcc on.
*/
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
!hba->dev_info.is_lu_power_on_wp) {
ufshcd_setup_vreg(hba, false);
vcc_off = true;
} else if (!ufshcd_is_ufs_dev_active(hba)) {
ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
vcc_off = true;
if (ufshcd_is_link_hibern8(hba) || ufshcd_is_link_off(hba)) {
ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
}
}
/*
* Some UFS devices require delay after VCC power rail is turned-off.
*/
if (vcc_off && hba->vreg_info.vcc &&
hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)
usleep_range(5000, 5100);
}
#ifdef CONFIG_PM
static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
{
int ret = 0;
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
!hba->dev_info.is_lu_power_on_wp) {
ret = ufshcd_setup_vreg(hba, true);
} else if (!ufshcd_is_ufs_dev_active(hba)) {
if (!ufshcd_is_link_active(hba)) {
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
if (ret)
goto vcc_disable;
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
if (ret)
goto vccq_lpm;
}
ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
}
goto out;
vccq_lpm:
ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
vcc_disable:
ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
out:
return ret;
}
#endif /* CONFIG_PM */
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
{
if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
ufshcd_setup_hba_vreg(hba, false);
}
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
{
if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
ufshcd_setup_hba_vreg(hba, true);
}
static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret = 0;
bool check_for_bkops;
enum ufs_pm_level pm_lvl;
enum ufs_dev_pwr_mode req_dev_pwr_mode;
enum uic_link_state req_link_state;
hba->pm_op_in_progress = true;
if (pm_op != UFS_SHUTDOWN_PM) {
pm_lvl = pm_op == UFS_RUNTIME_PM ?
hba->rpm_lvl : hba->spm_lvl;
req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
} else {
req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
req_link_state = UIC_LINK_OFF_STATE;
}
/*
* If we can't transition into any of the low power modes
* just gate the clocks.
*/
ufshcd_hold(hba);
hba->clk_gating.is_suspended = true;
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, true);
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
goto vops_suspend;
}
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
(req_link_state == hba->uic_link_state))
goto enable_scaling;
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
ret = -EINVAL;
goto enable_scaling;
}
if (pm_op == UFS_RUNTIME_PM) {
if (ufshcd_can_autobkops_during_suspend(hba)) {
/*
* The device is idle with no requests in the queue,
* allow background operations if bkops status shows
* that performance might be impacted.
*/
ret = ufshcd_urgent_bkops(hba);
if (ret) {
/*
* If return err in suspend flow, IO will hang.
* Trigger error handler and break suspend for
* error recovery.
*/
ufshcd_force_error_recovery(hba);
ret = -EBUSY;
goto enable_scaling;
}
} else {
/* make sure that auto bkops is disabled */
ufshcd_disable_auto_bkops(hba);
}
/*
* If device needs to do BKOP or WB buffer flush during
* Hibern8, keep device power mode as "active power mode"
* and VCC supply.
*/
hba->dev_info.b_rpm_dev_flush_capable =
hba->auto_bkops_enabled ||
(((req_link_state == UIC_LINK_HIBERN8_STATE) ||
((req_link_state == UIC_LINK_ACTIVE_STATE) &&
ufshcd_is_auto_hibern8_enabled(hba))) &&
ufshcd_wb_need_flush(hba));
}
flush_work(&hba->eeh_work);
ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
if (ret)
goto enable_scaling;
if (req_dev_pwr_mode != hba->curr_dev_pwr_mode) {
if (pm_op != UFS_RUNTIME_PM)
/* ensure that bkops is disabled */
ufshcd_disable_auto_bkops(hba);
if (!hba->dev_info.b_rpm_dev_flush_capable) {
ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
if (ret && pm_op != UFS_SHUTDOWN_PM) {
/*
* If return err in suspend flow, IO will hang.
* Trigger error handler and break suspend for
* error recovery.
*/
ufshcd_force_error_recovery(hba);
ret = -EBUSY;
}
if (ret)
goto enable_scaling;
}
}
/*
* In the case of DeepSleep, the device is expected to remain powered
* with the link off, so do not check for bkops.
*/
check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
if (ret && pm_op != UFS_SHUTDOWN_PM) {
/*
* If return err in suspend flow, IO will hang.
* Trigger error handler and break suspend for
* error recovery.
*/
ufshcd_force_error_recovery(hba);
ret = -EBUSY;
}
if (ret)
goto set_dev_active;
vops_suspend:
/*
* Call vendor specific suspend callback. As these callbacks may access
* vendor specific host controller register space call them before the
* host clocks are ON.
*/
ret = ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
if (ret)
goto set_link_active;
goto out;
set_link_active:
/*
* Device hardware reset is required to exit DeepSleep. Also, for
* DeepSleep, the link is off so host reset and restore will be done
* further below.
*/
if (ufshcd_is_ufs_dev_deepsleep(hba)) {
ufshcd_device_reset(hba);
WARN_ON(!ufshcd_is_link_off(hba));
}
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
ufshcd_set_link_active(hba);
else if (ufshcd_is_link_off(hba))
ufshcd_host_reset_and_restore(hba);
set_dev_active:
/* Can also get here needing to exit DeepSleep */
if (ufshcd_is_ufs_dev_deepsleep(hba)) {
ufshcd_device_reset(hba);
ufshcd_host_reset_and_restore(hba);
}
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_scaling:
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
hba->dev_info.b_rpm_dev_flush_capable = false;
out:
if (hba->dev_info.b_rpm_dev_flush_capable) {
schedule_delayed_work(&hba->rpm_dev_flush_recheck_work,
msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS));
}
if (ret) {
ufshcd_update_evt_hist(hba, UFS_EVT_WL_SUSP_ERR, (u32)ret);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
}
hba->pm_op_in_progress = false;
return ret;
}
#ifdef CONFIG_PM
static int __ufshcd_wl_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret;
enum uic_link_state old_link_state = hba->uic_link_state;
hba->pm_op_in_progress = true;
/*
* Call vendor specific resume callback. As these callbacks may access
* vendor specific host controller register space call them when the
* host clocks are ON.
*/
ret = ufshcd_vops_resume(hba, pm_op);
if (ret)
goto out;
/* For DeepSleep, the only supported option is to have the link off */
WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba);
if (!ret) {
ufshcd_set_link_active(hba);
} else {
dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
__func__, ret);
goto vendor_suspend;
}
} else if (ufshcd_is_link_off(hba)) {
/*
* A full initialization of the host and the device is
* required since the link was put to off during suspend.
* Note, in the case of DeepSleep, the device will exit
* DeepSleep due to device reset.
*/
ret = ufshcd_reset_and_restore(hba);
/*
* ufshcd_reset_and_restore() should have already
* set the link state as active
*/
if (ret || !ufshcd_is_link_active(hba))
goto vendor_suspend;
}
if (!ufshcd_is_ufs_dev_active(hba)) {
ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
if (ret)
goto set_old_link_state;
ufshcd_set_timestamp_attr(hba);
}
if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
ufshcd_enable_auto_bkops(hba);
else
/*
* If BKOPs operations are urgently needed at this moment then
* keep auto-bkops enabled or else disable it.
*/
ufshcd_urgent_bkops(hba);
if (hba->ee_usr_mask)
ufshcd_write_ee_control(hba);
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
if (hba->dev_info.b_rpm_dev_flush_capable) {
hba->dev_info.b_rpm_dev_flush_capable = false;
cancel_delayed_work(&hba->rpm_dev_flush_recheck_work);
}
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
goto out;
set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0);
vendor_suspend:
ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
ufshcd_vops_suspend(hba, pm_op, POST_CHANGE);
out:
if (ret)
ufshcd_update_evt_hist(hba, UFS_EVT_WL_RES_ERR, (u32)ret);
hba->clk_gating.is_suspended = false;
ufshcd_release(hba);
hba->pm_op_in_progress = false;
return ret;
}
static int ufshcd_wl_runtime_suspend(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ufs_hba *hba;
int ret;
ktime_t start = ktime_get();
hba = shost_priv(sdev->host);
ret = __ufshcd_wl_suspend(hba, UFS_RUNTIME_PM);
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
trace_ufshcd_wl_runtime_suspend(dev_name(dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
}
static int ufshcd_wl_runtime_resume(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ufs_hba *hba;
int ret = 0;
ktime_t start = ktime_get();
hba = shost_priv(sdev->host);
ret = __ufshcd_wl_resume(hba, UFS_RUNTIME_PM);
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
trace_ufshcd_wl_runtime_resume(dev_name(dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
}
#endif
#ifdef CONFIG_PM_SLEEP
static int ufshcd_wl_suspend(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ufs_hba *hba;
int ret = 0;
ktime_t start = ktime_get();
hba = shost_priv(sdev->host);
down(&hba->host_sem);
hba->system_suspending = true;
if (pm_runtime_suspended(dev))
goto out;
ret = __ufshcd_wl_suspend(hba, UFS_SYSTEM_PM);
if (ret) {
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
up(&hba->host_sem);
}
out:
if (!ret)
hba->is_sys_suspended = true;
trace_ufshcd_wl_suspend(dev_name(dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
}
static int ufshcd_wl_resume(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ufs_hba *hba;
int ret = 0;
ktime_t start = ktime_get();
hba = shost_priv(sdev->host);
if (pm_runtime_suspended(dev))
goto out;
ret = __ufshcd_wl_resume(hba, UFS_SYSTEM_PM);
if (ret)
dev_err(&sdev->sdev_gendev, "%s failed: %d\n", __func__, ret);
out:
trace_ufshcd_wl_resume(dev_name(dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = false;
hba->system_suspending = false;
up(&hba->host_sem);
return ret;
}
#endif
/**
* ufshcd_suspend - helper function for suspend operations
* @hba: per adapter instance
*
* This function will put disable irqs, turn off clocks
* and set vreg and hba-vreg in lpm mode.
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_suspend(struct ufs_hba *hba)
{
int ret;
if (!hba->is_powered)
return 0;
/*
* Disable the host irq as host controller as there won't be any
* host controller transaction expected till resume.
*/
ufshcd_disable_irq(hba);
ret = ufshcd_setup_clocks(hba, false);
if (ret) {
ufshcd_enable_irq(hba);
return ret;
}
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
ufshcd_vreg_set_lpm(hba);
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
return ret;
}
#ifdef CONFIG_PM
/**
* ufshcd_resume - helper function for resume operations
* @hba: per adapter instance
*
* This function basically turns on the regulators, clocks and
* irqs of the hba.
*
* Return: 0 for success and non-zero for failure.
*/
static int ufshcd_resume(struct ufs_hba *hba)
{
int ret;
if (!hba->is_powered)
return 0;
ufshcd_hba_vreg_set_hpm(hba);
ret = ufshcd_vreg_set_hpm(hba);
if (ret)
goto out;
/* Make sure clocks are enabled before accessing controller */
ret = ufshcd_setup_clocks(hba, true);
if (ret)
goto disable_vreg;
/* enable the host irq as host controller would be active soon */
ufshcd_enable_irq(hba);
goto out;
disable_vreg:
ufshcd_vreg_set_lpm(hba);
out:
if (ret)
ufshcd_update_evt_hist(hba, UFS_EVT_RESUME_ERR, (u32)ret);
return ret;
}
#endif /* CONFIG_PM */
#ifdef CONFIG_PM_SLEEP
/**
* ufshcd_system_suspend - system suspend callback
* @dev: Device associated with the UFS controller.
*
* Executed before putting the system into a sleep state in which the contents
* of main memory are preserved.
*
* Return: 0 for success and non-zero for failure.
*/
int ufshcd_system_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret = 0;
ktime_t start = ktime_get();
if (pm_runtime_suspended(hba->dev))
goto out;
ret = ufshcd_suspend(hba);
out:
trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_suspend);
/**
* ufshcd_system_resume - system resume callback
* @dev: Device associated with the UFS controller.
*
* Executed after waking the system up from a sleep state in which the contents
* of main memory were preserved.
*
* Return: 0 for success and non-zero for failure.
*/
int ufshcd_system_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
ktime_t start = ktime_get();
int ret = 0;
if (pm_runtime_suspended(hba->dev))
goto out;
ret = ufshcd_resume(hba);
out:
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
/**
* ufshcd_runtime_suspend - runtime suspend callback
* @dev: Device associated with the UFS controller.
*
* Check the description of ufshcd_suspend() function for more details.
*
* Return: 0 for success and non-zero for failure.
*/
int ufshcd_runtime_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
ktime_t start = ktime_get();
ret = ufshcd_suspend(hba);
trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
}
EXPORT_SYMBOL(ufshcd_runtime_suspend);
/**
* ufshcd_runtime_resume - runtime resume routine
* @dev: Device associated with the UFS controller.
*
* This function basically brings controller
* to active state. Following operations are done in this function:
*
* 1. Turn on all the controller related clocks
* 2. Turn ON VCC rail
*
* Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_runtime_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
ktime_t start = ktime_get();
ret = ufshcd_resume(hba);
trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
#endif /* CONFIG_PM */
static void ufshcd_wl_shutdown(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ufs_hba *hba = shost_priv(sdev->host);
down(&hba->host_sem);
hba->shutting_down = true;
up(&hba->host_sem);
/* Turn on everything while shutting down */
ufshcd_rpm_get_sync(hba);
scsi_device_quiesce(sdev);
shost_for_each_device(sdev, hba->host) {
if (sdev == hba->ufs_device_wlun)
continue;
scsi_device_quiesce(sdev);
}
__ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
/*
* Next, turn off the UFS controller and the UFS regulators. Disable
* clocks.
*/
if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
ufshcd_suspend(hba);
hba->is_powered = false;
}
/**
* ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
* @hba: per adapter instance
*/
void ufshcd_remove(struct ufs_hba *hba)
{
if (hba->ufs_device_wlun)
ufshcd_rpm_get_sync(hba);
ufs_hwmon_remove(hba);
ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
#ifdef CONFIG_PM_SLEEP
int ufshcd_system_freeze(struct device *dev)
{
return ufshcd_system_suspend(dev);
}
EXPORT_SYMBOL_GPL(ufshcd_system_freeze);
int ufshcd_system_restore(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
ret = ufshcd_system_resume(dev);
if (ret)
return ret;
/* Configure UTRL and UTMRL base address registers */
ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
REG_UTP_TRANSFER_REQ_LIST_BASE_L);
ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
REG_UTP_TRANSFER_REQ_LIST_BASE_H);
ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
REG_UTP_TASK_REQ_LIST_BASE_L);
ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
REG_UTP_TASK_REQ_LIST_BASE_H);
/*
* Make sure that UTRL and UTMRL base address registers
* are updated with the latest queue addresses. Only after
* updating these addresses, we can queue the new commands.
*/
mb();
/* Resuming from hibernate, assume that link was OFF */
ufshcd_set_link_off(hba);
return 0;
}
EXPORT_SYMBOL_GPL(ufshcd_system_restore);
int ufshcd_system_thaw(struct device *dev)
{
return ufshcd_system_resume(dev);
}
EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
#endif /* CONFIG_PM_SLEEP */
/**
* ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
* @hba: pointer to Host Bus Adapter (HBA)
*/
void ufshcd_dealloc_host(struct ufs_hba *hba)
{
scsi_host_put(hba->host);
}
EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
/**
* ufshcd_set_dma_mask - Set dma mask based on the controller
* addressing capability
* @hba: per adapter instance
*
* Return: 0 for success, non-zero for failure.
*/
static int ufshcd_set_dma_mask(struct ufs_hba *hba)
{
if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
return 0;
}
return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
}
/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
* @hba_handle: driver private handle
*
* Return: 0 on success, non-zero value on failure.
*/
int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
int err = 0;
if (!dev) {
dev_err(dev,
"Invalid memory reference for dev is NULL\n");
err = -ENODEV;
goto out_error;
}
host = scsi_host_alloc(&ufshcd_driver_template,
sizeof(struct ufs_hba));
if (!host) {
dev_err(dev, "scsi_host_alloc failed\n");
err = -ENOMEM;
goto out_error;
}
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
hba->nop_out_timeout = NOP_OUT_TIMEOUT;
ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry));
INIT_LIST_HEAD(&hba->clk_list_head);
spin_lock_init(&hba->outstanding_lock);
*hba_handle = hba;
out_error:
return err;
}
EXPORT_SYMBOL(ufshcd_alloc_host);
/* This function exists because blk_mq_alloc_tag_set() requires this. */
static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *qd)
{
WARN_ON_ONCE(true);
return BLK_STS_NOTSUPP;
}
static const struct blk_mq_ops ufshcd_tmf_ops = {
.queue_rq = ufshcd_queue_tmf,
};
/**
* ufshcd_init - Driver initialization routine
* @hba: per-adapter instance
* @mmio_base: base register address
* @irq: Interrupt line of device
*
* Return: 0 on success, non-zero value on failure.
*/
int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
{
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
char eh_wq_name[sizeof("ufs_eh_wq_00")];
/*
* dev_set_drvdata() must be called before any callbacks are registered
* that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
* sysfs).
*/
dev_set_drvdata(dev, hba);
if (!mmio_base) {
dev_err(hba->dev,
"Invalid memory reference for mmio_base is NULL\n");
err = -ENODEV;
goto out_error;
}
hba->mmio_base = mmio_base;
hba->irq = irq;
hba->vps = &ufs_hba_vps;
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
/* Read capabilities registers */
err = ufshcd_hba_capabilities(hba);
if (err)
goto out_disable;
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
/* Get Interrupt bit mask per version */
hba->intr_mask = ufshcd_get_intr_mask(hba);
err = ufshcd_set_dma_mask(hba);
if (err) {
dev_err(hba->dev, "set dma mask failed\n");
goto out_disable;
}
/* Allocate memory for host memory space */
err = ufshcd_memory_alloc(hba);
if (err) {
dev_err(hba->dev, "Memory allocation failed\n");
goto out_disable;
}
/* Configure LRB */
ufshcd_host_memory_configure(hba);
host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
host->max_id = UFSHCD_MAX_ID;
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
host->unique_id = host->host_no;
host->max_cmd_len = UFS_CDB_SIZE;
host->queuecommand_may_block = !!(hba->caps & UFSHCD_CAP_CLK_GATING);
hba->max_pwr_info.is_valid = false;
/* Initialize work queues */
snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
hba->host->host_no);
hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
if (!hba->eh_wq) {
dev_err(hba->dev, "%s: failed to create eh workqueue\n",
__func__);
err = -ENOMEM;
goto out_disable;
}
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
sema_init(&hba->host_sem, 1);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
/* Initialize mutex for device management commands */
mutex_init(&hba->dev_cmd.lock);
/* Initialize mutex for exception event control */
mutex_init(&hba->ee_ctrl_mutex);
mutex_init(&hba->wb_mutex);
init_rwsem(&hba->clk_scaling_lock);
ufshcd_init_clk_gating(hba);
ufshcd_init_clk_scaling(hba);
/*
* In order to avoid any spurious interrupt immediately after
* registering UFS controller interrupt handler, clear any pending UFS
* interrupt status and disable all the UFS interrupts.
*/
ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
REG_INTERRUPT_STATUS);
ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
/*
* Make sure that UFS interrupts are disabled and any pending interrupt
* status is cleared before registering UFS interrupt handler.
*/
mb();
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
goto out_disable;
} else {
hba->is_irq_enabled = true;
}
if (!is_mcq_supported(hba)) {
err = scsi_add_host(host, hba->dev);
if (err) {
dev_err(hba->dev, "scsi_add_host failed\n");
goto out_disable;
}
}
hba->tmf_tag_set = (struct blk_mq_tag_set) {
.nr_hw_queues = 1,
.queue_depth = hba->nutmrs,
.ops = &ufshcd_tmf_ops,
.flags = BLK_MQ_F_NO_SCHED,
};
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
goto out_remove_scsi_host;
hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
if (IS_ERR(hba->tmf_queue)) {
err = PTR_ERR(hba->tmf_queue);
goto free_tmf_tag_set;
}
hba->tmf_rqs = devm_kcalloc(hba->dev, hba->nutmrs,
sizeof(*hba->tmf_rqs), GFP_KERNEL);
if (!hba->tmf_rqs) {
err = -ENOMEM;
goto free_tmf_queue;
}
/* Reset the attached device */
ufshcd_device_reset(hba);
ufshcd_init_crypto(hba);
/* Host controller enable */
err = ufshcd_hba_enable(hba);
if (err) {
dev_err(hba->dev, "Host controller enable failed\n");
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
goto free_tmf_queue;
}
/*
* Set the default power management level for runtime and system PM.
* Default power saving mode is to keep UFS link in Hibern8 state
* and UFS device in sleep state.
*/
hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
UFS_SLEEP_PWR_MODE,
UIC_LINK_HIBERN8_STATE);
INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work,
ufshcd_rpm_dev_flush_recheck_work);
/* Set the default auto-hiberate idle timer value to 150 ms */
if (ufshcd_is_auto_hibern8_supported(hba) && !hba->ahit) {
hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 150) |
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
}
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
atomic_set(&hba->scsi_block_reqs_cnt, 0);
/*
* We are assuming that device wasn't put in sleep/power-down
* state exclusively during the boot stage before kernel.
* This assumption helps avoid doing link startup twice during
* ufshcd_probe_hba().
*/
ufshcd_set_ufs_dev_active(hba);
async_schedule(ufshcd_async_scan, hba);
ufs_sysfs_add_nodes(hba->dev);
device_enable_async_suspend(dev);
return 0;
free_tmf_queue:
blk_mq_destroy_queue(hba->tmf_queue);
blk_put_queue(hba->tmf_queue);
free_tmf_tag_set:
blk_mq_free_tag_set(&hba->tmf_tag_set);
out_remove_scsi_host:
scsi_remove_host(hba->host);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
out_error:
return err;
}
EXPORT_SYMBOL_GPL(ufshcd_init);
void ufshcd_resume_complete(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
if (hba->complete_put) {
ufshcd_rpm_put(hba);
hba->complete_put = false;
}
}
EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
{
struct device *dev = &hba->ufs_device_wlun->sdev_gendev;
enum ufs_dev_pwr_mode dev_pwr_mode;
enum uic_link_state link_state;
unsigned long flags;
bool res;
spin_lock_irqsave(&dev->power.lock, flags);
dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
res = pm_runtime_suspended(dev) &&
hba->curr_dev_pwr_mode == dev_pwr_mode &&
hba->uic_link_state == link_state &&
!hba->dev_info.b_rpm_dev_flush_capable;
spin_unlock_irqrestore(&dev->power.lock, flags);
return res;
}
int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
/*
* SCSI assumes that runtime-pm and system-pm for scsi drivers
* are same. And it doesn't wake up the device for system-suspend
* if it's runtime suspended. But ufs doesn't follow that.
* Refer ufshcd_resume_complete()
*/
if (hba->ufs_device_wlun) {
/* Prevent runtime suspend */
ufshcd_rpm_get_noresume(hba);
/*
* Check if already runtime suspended in same state as system
* suspend would be.
*/
if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
/* RPM state is not ok for SPM, so runtime resume */
ret = ufshcd_rpm_resume(hba);
if (ret < 0 && ret != -EACCES) {
ufshcd_rpm_put(hba);
return ret;
}
}
hba->complete_put = true;
}
return 0;
}
EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
int ufshcd_suspend_prepare(struct device *dev)
{
return __ufshcd_suspend_prepare(dev, true);
}
EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
#ifdef CONFIG_PM_SLEEP
static int ufshcd_wl_poweroff(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct ufs_hba *hba = shost_priv(sdev->host);
__ufshcd_wl_suspend(hba, UFS_SHUTDOWN_PM);
return 0;
}
#endif
static int ufshcd_wl_probe(struct device *dev)
{
struct scsi_device *sdev = to_scsi_device(dev);
if (!is_device_wlun(sdev))
return -ENODEV;
blk_pm_runtime_init(sdev->request_queue, dev);
pm_runtime_set_autosuspend_delay(dev, 0);
pm_runtime_allow(dev);
return 0;
}
static int ufshcd_wl_remove(struct device *dev)
{
pm_runtime_forbid(dev);
return 0;
}
static const struct dev_pm_ops ufshcd_wl_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend = ufshcd_wl_suspend,
.resume = ufshcd_wl_resume,
.freeze = ufshcd_wl_suspend,
.thaw = ufshcd_wl_resume,
.poweroff = ufshcd_wl_poweroff,
.restore = ufshcd_wl_resume,
#endif
SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend, ufshcd_wl_runtime_resume, NULL)
};
static void ufshcd_check_header_layout(void)
{
/*
* gcc compilers before version 10 cannot do constant-folding for
* sub-byte bitfields. Hence skip the layout checks for gcc 9 and
* before.
*/
if (IS_ENABLED(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 100000)
return;
BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
.cci = 3})[0] != 3);
BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
.ehs_length = 2})[1] != 2);
BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
.enable_crypto = 1})[2]
!= 0x80);
BUILD_BUG_ON((((u8 *)&(struct request_desc_header){
.command_type = 5,
.data_direction = 3,
.interrupt = 1,
})[3]) != ((5 << 4) | (3 << 1) | 1));
BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
.dunl = cpu_to_le32(0xdeadbeef)})[1] !=
cpu_to_le32(0xdeadbeef));
BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
.ocs = 4})[8] != 4);
BUILD_BUG_ON(((u8 *)&(struct request_desc_header){
.cds = 5})[9] != 5);
BUILD_BUG_ON(((__le32 *)&(struct request_desc_header){
.dunu = cpu_to_le32(0xbadcafe)})[3] !=
cpu_to_le32(0xbadcafe));
BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
.iid = 0xf })[4] != 0xf0);
BUILD_BUG_ON(((u8 *)&(struct utp_upiu_header){
.command_set_type = 0xf })[4] != 0xf);
}
/*
* ufs_dev_wlun_template - describes ufs device wlun
* ufs-device wlun - used to send pm commands
* All luns are consumers of ufs-device wlun.
*
* Currently, no sd driver is present for wluns.
* Hence the no specific pm operations are performed.
* With ufs design, SSU should be sent to ufs-device wlun.
* Hence register a scsi driver for ufs wluns only.
*/
static struct scsi_driver ufs_dev_wlun_template = {
.gendrv = {
.name = "ufs_device_wlun",
.owner = THIS_MODULE,
.probe = ufshcd_wl_probe,
.remove = ufshcd_wl_remove,
.pm = &ufshcd_wl_pm_ops,
.shutdown = ufshcd_wl_shutdown,
},
};
static int __init ufshcd_core_init(void)
{
int ret;
ufshcd_check_header_layout();
ufs_debugfs_init();
ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv);
if (ret)
ufs_debugfs_exit();
return ret;
}
static void __exit ufshcd_core_exit(void)
{
ufs_debugfs_exit();
scsi_unregister_driver(&ufs_dev_wlun_template.gendrv);
}
module_init(ufshcd_core_init);
module_exit(ufshcd_core_exit);
MODULE_AUTHOR("Santosh Yaragnavi <[email protected]>");
MODULE_AUTHOR("Vinayak Holikatti <[email protected]>");
MODULE_DESCRIPTION("Generic UFS host controller driver Core");
MODULE_SOFTDEP("pre: governor_simpleondemand");
MODULE_LICENSE("GPL");
| linux-master | drivers/ufs/core/ufshcd.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2019 Google LLC
*/
#include <ufs/ufshcd.h>
#include "ufshcd-crypto.h"
/* Blk-crypto modes supported by UFS crypto */
static const struct ufs_crypto_alg_entry {
enum ufs_crypto_alg ufs_alg;
enum ufs_crypto_key_size ufs_key_size;
} ufs_crypto_algs[BLK_ENCRYPTION_MODE_MAX] = {
[BLK_ENCRYPTION_MODE_AES_256_XTS] = {
.ufs_alg = UFS_CRYPTO_ALG_AES_XTS,
.ufs_key_size = UFS_CRYPTO_KEY_SIZE_256,
},
};
static int ufshcd_program_key(struct ufs_hba *hba,
const union ufs_crypto_cfg_entry *cfg, int slot)
{
int i;
u32 slot_offset = hba->crypto_cfg_register + slot * sizeof(*cfg);
int err = 0;
ufshcd_hold(hba);
if (hba->vops && hba->vops->program_key) {
err = hba->vops->program_key(hba, cfg, slot);
goto out;
}
/* Ensure that CFGE is cleared before programming the key */
ufshcd_writel(hba, 0, slot_offset + 16 * sizeof(cfg->reg_val[0]));
for (i = 0; i < 16; i++) {
ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[i]),
slot_offset + i * sizeof(cfg->reg_val[0]));
}
/* Write dword 17 */
ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[17]),
slot_offset + 17 * sizeof(cfg->reg_val[0]));
/* Dword 16 must be written last */
ufshcd_writel(hba, le32_to_cpu(cfg->reg_val[16]),
slot_offset + 16 * sizeof(cfg->reg_val[0]));
out:
ufshcd_release(hba);
return err;
}
static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
struct ufs_hba *hba =
container_of(profile, struct ufs_hba, crypto_profile);
const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array;
const struct ufs_crypto_alg_entry *alg =
&ufs_crypto_algs[key->crypto_cfg.crypto_mode];
u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512;
int i;
int cap_idx = -1;
union ufs_crypto_cfg_entry cfg = {};
int err;
BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
for (i = 0; i < hba->crypto_capabilities.num_crypto_cap; i++) {
if (ccap_array[i].algorithm_id == alg->ufs_alg &&
ccap_array[i].key_size == alg->ufs_key_size &&
(ccap_array[i].sdus_mask & data_unit_mask)) {
cap_idx = i;
break;
}
}
if (WARN_ON(cap_idx < 0))
return -EOPNOTSUPP;
cfg.data_unit_size = data_unit_mask;
cfg.crypto_cap_idx = cap_idx;
cfg.config_enable = UFS_CRYPTO_CONFIGURATION_ENABLE;
if (ccap_array[cap_idx].algorithm_id == UFS_CRYPTO_ALG_AES_XTS) {
/* In XTS mode, the blk_crypto_key's size is already doubled */
memcpy(cfg.crypto_key, key->raw, key->size/2);
memcpy(cfg.crypto_key + UFS_CRYPTO_KEY_MAX_SIZE/2,
key->raw + key->size/2, key->size/2);
} else {
memcpy(cfg.crypto_key, key->raw, key->size);
}
err = ufshcd_program_key(hba, &cfg, slot);
memzero_explicit(&cfg, sizeof(cfg));
return err;
}
static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
{
/*
* Clear the crypto cfg on the device. Clearing CFGE
* might not be sufficient, so just clear the entire cfg.
*/
union ufs_crypto_cfg_entry cfg = {};
return ufshcd_program_key(hba, &cfg, slot);
}
static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
struct ufs_hba *hba =
container_of(profile, struct ufs_hba, crypto_profile);
return ufshcd_clear_keyslot(hba, slot);
}
bool ufshcd_crypto_enable(struct ufs_hba *hba)
{
if (!(hba->caps & UFSHCD_CAP_CRYPTO))
return false;
/* Reset might clear all keys, so reprogram all the keys. */
blk_crypto_reprogram_all_keys(&hba->crypto_profile);
return true;
}
static const struct blk_crypto_ll_ops ufshcd_crypto_ops = {
.keyslot_program = ufshcd_crypto_keyslot_program,
.keyslot_evict = ufshcd_crypto_keyslot_evict,
};
static enum blk_crypto_mode_num
ufshcd_find_blk_crypto_mode(union ufs_crypto_cap_entry cap)
{
int i;
for (i = 0; i < ARRAY_SIZE(ufs_crypto_algs); i++) {
BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
if (ufs_crypto_algs[i].ufs_alg == cap.algorithm_id &&
ufs_crypto_algs[i].ufs_key_size == cap.key_size) {
return i;
}
}
return BLK_ENCRYPTION_MODE_INVALID;
}
/**
* ufshcd_hba_init_crypto_capabilities - Read crypto capabilities, init crypto
* fields in hba
* @hba: Per adapter instance
*
* Return: 0 if crypto was initialized or is not supported, else a -errno value.
*/
int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
{
int cap_idx;
int err = 0;
enum blk_crypto_mode_num blk_mode_num;
/*
* Don't use crypto if either the hardware doesn't advertise the
* standard crypto capability bit *or* if the vendor specific driver
* hasn't advertised that crypto is supported.
*/
if (!(hba->capabilities & MASK_CRYPTO_SUPPORT) ||
!(hba->caps & UFSHCD_CAP_CRYPTO))
goto out;
hba->crypto_capabilities.reg_val =
cpu_to_le32(ufshcd_readl(hba, REG_UFS_CCAP));
hba->crypto_cfg_register =
(u32)hba->crypto_capabilities.config_array_ptr * 0x100;
hba->crypto_cap_array =
devm_kcalloc(hba->dev, hba->crypto_capabilities.num_crypto_cap,
sizeof(hba->crypto_cap_array[0]), GFP_KERNEL);
if (!hba->crypto_cap_array) {
err = -ENOMEM;
goto out;
}
/* The actual number of configurations supported is (CFGC+1) */
err = devm_blk_crypto_profile_init(
hba->dev, &hba->crypto_profile,
hba->crypto_capabilities.config_count + 1);
if (err)
goto out;
hba->crypto_profile.ll_ops = ufshcd_crypto_ops;
/* UFS only supports 8 bytes for any DUN */
hba->crypto_profile.max_dun_bytes_supported = 8;
hba->crypto_profile.dev = hba->dev;
/*
* Cache all the UFS crypto capabilities and advertise the supported
* crypto modes and data unit sizes to the block layer.
*/
for (cap_idx = 0; cap_idx < hba->crypto_capabilities.num_crypto_cap;
cap_idx++) {
hba->crypto_cap_array[cap_idx].reg_val =
cpu_to_le32(ufshcd_readl(hba,
REG_UFS_CRYPTOCAP +
cap_idx * sizeof(__le32)));
blk_mode_num = ufshcd_find_blk_crypto_mode(
hba->crypto_cap_array[cap_idx]);
if (blk_mode_num != BLK_ENCRYPTION_MODE_INVALID)
hba->crypto_profile.modes_supported[blk_mode_num] |=
hba->crypto_cap_array[cap_idx].sdus_mask * 512;
}
return 0;
out:
/* Indicate that init failed by clearing UFSHCD_CAP_CRYPTO */
hba->caps &= ~UFSHCD_CAP_CRYPTO;
return err;
}
/**
* ufshcd_init_crypto - Initialize crypto hardware
* @hba: Per adapter instance
*/
void ufshcd_init_crypto(struct ufs_hba *hba)
{
int slot;
if (!(hba->caps & UFSHCD_CAP_CRYPTO))
return;
/* Clear all keyslots - the number of keyslots is (CFGC + 1) */
for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++)
ufshcd_clear_keyslot(hba, slot);
}
void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
{
if (hba->caps & UFSHCD_CAP_CRYPTO)
blk_crypto_register(&hba->crypto_profile, q);
}
| linux-master | drivers/ufs/core/ufshcd-crypto.c |
// SPDX-License-Identifier: GPL-2.0
/*
* bsg endpoint that supports UPIUs
*
* Copyright (C) 2018 Western Digital Corporation
*/
#include <linux/bsg-lib.h>
#include <linux/dma-mapping.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include "ufs_bsg.h"
#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
struct utp_upiu_query *qr)
{
int desc_size = be16_to_cpu(qr->length);
if (desc_size <= 0)
return -EINVAL;
*desc_len = min_t(int, QUERY_DESC_MAX_SIZE, desc_size);
return 0;
}
static int ufs_bsg_alloc_desc_buffer(struct ufs_hba *hba, struct bsg_job *job,
uint8_t **desc_buff, int *desc_len,
enum query_opcode desc_op)
{
struct ufs_bsg_request *bsg_request = job->request;
struct utp_upiu_query *qr;
u8 *descp;
if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC &&
desc_op != UPIU_QUERY_OPCODE_READ_DESC)
goto out;
qr = &bsg_request->upiu_req.qr;
if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
dev_err(hba->dev, "Illegal desc size\n");
return -EINVAL;
}
if (*desc_len > job->request_payload.payload_len) {
dev_err(hba->dev, "Illegal desc size\n");
return -EINVAL;
}
descp = kzalloc(*desc_len, GFP_KERNEL);
if (!descp)
return -ENOMEM;
if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
sg_copy_to_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt, descp,
*desc_len);
*desc_buff = descp;
out:
return 0;
}
static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job)
{
struct ufs_rpmb_request *rpmb_request = job->request;
struct ufs_rpmb_reply *rpmb_reply = job->reply;
struct bsg_buffer *payload = NULL;
enum dma_data_direction dir;
struct scatterlist *sg_list = NULL;
int rpmb_req_type;
int sg_cnt = 0;
int ret;
int data_len;
if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en)
return -EINVAL;
if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1)
return -EINVAL;
rpmb_req_type = be16_to_cpu(rpmb_request->ehs_req.meta.req_resp_type);
switch (rpmb_req_type) {
case UFS_RPMB_WRITE_KEY:
case UFS_RPMB_READ_CNT:
case UFS_RPMB_PURGE_ENABLE:
dir = DMA_NONE;
break;
case UFS_RPMB_WRITE:
case UFS_RPMB_SEC_CONF_WRITE:
dir = DMA_TO_DEVICE;
break;
case UFS_RPMB_READ:
case UFS_RPMB_SEC_CONF_READ:
case UFS_RPMB_PURGE_STATUS_READ:
dir = DMA_FROM_DEVICE;
break;
default:
return -EINVAL;
}
if (dir != DMA_NONE) {
payload = &job->request_payload;
if (!payload || !payload->payload_len || !payload->sg_cnt)
return -EINVAL;
sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
if (unlikely(!sg_cnt))
return -ENOMEM;
sg_list = payload->sg_list;
data_len = payload->payload_len;
}
ret = ufshcd_advanced_rpmb_req_handler(hba, &rpmb_request->bsg_request.upiu_req,
&rpmb_reply->bsg_reply.upiu_rsp, &rpmb_request->ehs_req,
&rpmb_reply->ehs_rsp, sg_cnt, sg_list, dir);
if (dir != DMA_NONE) {
dma_unmap_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir);
if (!ret)
rpmb_reply->bsg_reply.reply_payload_rcv_len = data_len;
}
return ret;
}
static int ufs_bsg_request(struct bsg_job *job)
{
struct ufs_bsg_request *bsg_request = job->request;
struct ufs_bsg_reply *bsg_reply = job->reply;
struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
struct uic_command uc = {};
int msgcode;
uint8_t *buff = NULL;
int desc_len = 0;
enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
int ret;
bool rpmb = false;
bsg_reply->reply_payload_rcv_len = 0;
ufshcd_rpm_get_sync(hba);
msgcode = bsg_request->msgcode;
switch (msgcode) {
case UPIU_TRANSACTION_QUERY_REQ:
desc_op = bsg_request->upiu_req.qr.opcode;
ret = ufs_bsg_alloc_desc_buffer(hba, job, &buff, &desc_len, desc_op);
if (ret)
goto out;
fallthrough;
case UPIU_TRANSACTION_NOP_OUT:
case UPIU_TRANSACTION_TASK_REQ:
ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
&bsg_reply->upiu_rsp, msgcode,
buff, &desc_len, desc_op);
if (ret)
dev_err(hba->dev, "exe raw upiu: error code %d\n", ret);
else if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) {
bsg_reply->reply_payload_rcv_len =
sg_copy_from_buffer(job->request_payload.sg_list,
job->request_payload.sg_cnt,
buff, desc_len);
}
break;
case UPIU_TRANSACTION_UIC_CMD:
memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
ret = ufshcd_send_uic_cmd(hba, &uc);
if (ret)
dev_err(hba->dev, "send uic cmd: error code %d\n", ret);
memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
break;
case UPIU_TRANSACTION_ARPMB_CMD:
rpmb = true;
ret = ufs_bsg_exec_advanced_rpmb_req(hba, job);
if (ret)
dev_err(hba->dev, "ARPMB OP failed: error code %d\n", ret);
break;
default:
ret = -ENOTSUPP;
dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
break;
}
out:
ufshcd_rpm_put_sync(hba);
kfree(buff);
bsg_reply->result = ret;
job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
if (ret == 0)
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
return ret;
}
/**
* ufs_bsg_remove - detach and remove the added ufs-bsg node
* @hba: per adapter object
*
* Should be called when unloading the driver.
*/
void ufs_bsg_remove(struct ufs_hba *hba)
{
struct device *bsg_dev = &hba->bsg_dev;
if (!hba->bsg_queue)
return;
bsg_remove_queue(hba->bsg_queue);
device_del(bsg_dev);
put_device(bsg_dev);
}
static inline void ufs_bsg_node_release(struct device *dev)
{
put_device(dev->parent);
}
/**
* ufs_bsg_probe - Add ufs bsg device node
* @hba: per adapter object
*
* Called during initial loading of the driver, and before scsi_scan_host.
*
* Returns: 0 (success).
*/
int ufs_bsg_probe(struct ufs_hba *hba)
{
struct device *bsg_dev = &hba->bsg_dev;
struct Scsi_Host *shost = hba->host;
struct device *parent = &shost->shost_gendev;
struct request_queue *q;
int ret;
device_initialize(bsg_dev);
bsg_dev->parent = get_device(parent);
bsg_dev->release = ufs_bsg_node_release;
dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no);
ret = device_add(bsg_dev);
if (ret)
goto out;
q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0);
if (IS_ERR(q)) {
ret = PTR_ERR(q);
goto out;
}
hba->bsg_queue = q;
return 0;
out:
dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
put_device(bsg_dev);
return ret;
}
| linux-master | drivers/ufs/core/ufs_bsg.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved.
*
* Authors:
* Asutosh Das <[email protected]>
* Can Guo <[email protected]>
*/
#include <asm/unaligned.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "ufshcd-priv.h"
#include <linux/delay.h>
#include <scsi/scsi_cmnd.h>
#include <linux/bitfield.h>
#include <linux/iopoll.h>
#define MAX_QUEUE_SUP GENMASK(7, 0)
#define UFS_MCQ_MIN_RW_QUEUES 2
#define UFS_MCQ_MIN_READ_QUEUES 0
#define UFS_MCQ_MIN_POLL_QUEUES 0
#define QUEUE_EN_OFFSET 31
#define QUEUE_ID_OFFSET 16
#define MCQ_CFG_MAC_MASK GENMASK(16, 8)
#define MCQ_QCFG_SIZE 0x40
#define MCQ_ENTRY_SIZE_IN_DWORD 8
#define CQE_UCD_BA GENMASK_ULL(63, 7)
/* Max mcq register polling time in microseconds */
#define MCQ_POLL_US 500000
static int rw_queue_count_set(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES,
num_possible_cpus());
}
static const struct kernel_param_ops rw_queue_count_ops = {
.set = rw_queue_count_set,
.get = param_get_uint,
};
static unsigned int rw_queues;
module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644);
MODULE_PARM_DESC(rw_queues,
"Number of interrupt driven I/O queues used for rw. Default value is nr_cpus");
static int read_queue_count_set(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES,
num_possible_cpus());
}
static const struct kernel_param_ops read_queue_count_ops = {
.set = read_queue_count_set,
.get = param_get_uint,
};
static unsigned int read_queues;
module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644);
MODULE_PARM_DESC(read_queues,
"Number of interrupt driven read queues used for read. Default value is 0");
static int poll_queue_count_set(const char *val, const struct kernel_param *kp)
{
return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES,
num_possible_cpus());
}
static const struct kernel_param_ops poll_queue_count_ops = {
.set = poll_queue_count_set,
.get = param_get_uint,
};
static unsigned int poll_queues = 1;
module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644);
MODULE_PARM_DESC(poll_queues,
"Number of poll queues used for r/w. Default value is 1");
/**
* ufshcd_mcq_config_mac - Set the #Max Activ Cmds.
* @hba: per adapter instance
* @max_active_cmds: maximum # of active commands to the device at any time.
*
* The controller won't send more than the max_active_cmds to the device at
* any time.
*/
void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds)
{
u32 val;
val = ufshcd_readl(hba, REG_UFS_MCQ_CFG);
val &= ~MCQ_CFG_MAC_MASK;
val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds);
ufshcd_writel(hba, val, REG_UFS_MCQ_CFG);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_config_mac);
/**
* ufshcd_mcq_req_to_hwq - find the hardware queue on which the
* request would be issued.
* @hba: per adapter instance
* @req: pointer to the request to be issued
*
* Return: the hardware queue instance on which the request would
* be queued.
*/
struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba,
struct request *req)
{
u32 utag = blk_mq_unique_tag(req);
u32 hwq = blk_mq_unique_tag_to_hwq(utag);
return &hba->uhq[hwq];
}
/**
* ufshcd_mcq_decide_queue_depth - decide the queue depth
* @hba: per adapter instance
*
* Return: queue-depth on success, non-zero on error
*
* MAC - Max. Active Command of the Host Controller (HC)
* HC wouldn't send more than this commands to the device.
* It is mandatory to implement get_hba_mac() to enable MCQ mode.
* Calculates and adjusts the queue depth based on the depth
* supported by the HC and ufs device.
*/
int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba)
{
int mac;
/* Mandatory to implement get_hba_mac() */
mac = ufshcd_mcq_vops_get_hba_mac(hba);
if (mac < 0) {
dev_err(hba->dev, "Failed to get mac, err=%d\n", mac);
return mac;
}
WARN_ON_ONCE(!hba->dev_info.bqueuedepth);
/*
* max. value of bqueuedepth = 256, mac is host dependent.
* It is mandatory for UFS device to define bQueueDepth if
* shared queuing architecture is enabled.
*/
return min_t(int, mac, hba->dev_info.bqueuedepth);
}
static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba)
{
int i;
u32 hba_maxq, rem, tot_queues;
struct Scsi_Host *host = hba->host;
/* maxq is 0 based value */
hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities) + 1;
tot_queues = read_queues + poll_queues + rw_queues;
if (hba_maxq < tot_queues) {
dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n",
tot_queues, hba_maxq);
return -EOPNOTSUPP;
}
rem = hba_maxq;
if (rw_queues) {
hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues;
rem -= hba->nr_queues[HCTX_TYPE_DEFAULT];
} else {
rw_queues = num_possible_cpus();
}
if (poll_queues) {
hba->nr_queues[HCTX_TYPE_POLL] = poll_queues;
rem -= hba->nr_queues[HCTX_TYPE_POLL];
}
if (read_queues) {
hba->nr_queues[HCTX_TYPE_READ] = read_queues;
rem -= hba->nr_queues[HCTX_TYPE_READ];
}
if (!hba->nr_queues[HCTX_TYPE_DEFAULT])
hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues,
num_possible_cpus());
for (i = 0; i < HCTX_MAX_TYPES; i++)
host->nr_hw_queues += hba->nr_queues[i];
hba->nr_hw_queues = host->nr_hw_queues;
return 0;
}
int ufshcd_mcq_memory_alloc(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
size_t utrdl_size, cqe_size;
int i;
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
utrdl_size = sizeof(struct utp_transfer_req_desc) *
hwq->max_entries;
hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size,
&hwq->sqe_dma_addr,
GFP_KERNEL);
if (!hwq->sqe_dma_addr) {
dev_err(hba->dev, "SQE allocation failed\n");
return -ENOMEM;
}
cqe_size = sizeof(struct cq_entry) * hwq->max_entries;
hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size,
&hwq->cqe_dma_addr,
GFP_KERNEL);
if (!hwq->cqe_dma_addr) {
dev_err(hba->dev, "CQE allocation failed\n");
return -ENOMEM;
}
}
return 0;
}
/* Operation and runtime registers configuration */
#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i))
#define MCQ_OPR_OFFSET_n(p, i) \
(hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i))
static void __iomem *mcq_opr_base(struct ufs_hba *hba,
enum ufshcd_mcq_opr n, int i)
{
struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n];
return opr->base + opr->stride * i;
}
u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i)
{
return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_read_cqis);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i)
{
writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis);
/*
* Current MCQ specification doesn't provide a Task Tag or its equivalent in
* the Completion Queue Entry. Find the Task Tag using an indirect method.
*/
static int ufshcd_mcq_get_tag(struct ufs_hba *hba,
struct ufs_hw_queue *hwq,
struct cq_entry *cqe)
{
u64 addr;
/* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */
BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0));
/* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */
addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) -
hba->ucdl_dma_addr;
return div_u64(addr, ufshcd_get_ucd_size(hba));
}
static void ufshcd_mcq_process_cqe(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq);
int tag = ufshcd_mcq_get_tag(hba, hwq, cqe);
if (cqe->command_desc_base_addr) {
ufshcd_compl_one_cqe(hba, tag, cqe);
/* After processed the cqe, mark it empty (invalid) entry */
cqe->command_desc_base_addr = 0;
}
}
void ufshcd_mcq_compl_all_cqes_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
unsigned long flags;
u32 entries = hwq->max_entries;
spin_lock_irqsave(&hwq->cq_lock, flags);
while (entries > 0) {
ufshcd_mcq_process_cqe(hba, hwq);
ufshcd_mcq_inc_cq_head_slot(hwq);
entries--;
}
ufshcd_mcq_update_cq_tail_slot(hwq);
hwq->cq_head_slot = hwq->cq_tail_slot;
spin_unlock_irqrestore(&hwq->cq_lock, flags);
}
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq)
{
unsigned long completed_reqs = 0;
unsigned long flags;
spin_lock_irqsave(&hwq->cq_lock, flags);
ufshcd_mcq_update_cq_tail_slot(hwq);
while (!ufshcd_mcq_is_cq_empty(hwq)) {
ufshcd_mcq_process_cqe(hba, hwq);
ufshcd_mcq_inc_cq_head_slot(hwq);
completed_reqs++;
}
if (completed_reqs)
ufshcd_mcq_update_cq_head(hwq);
spin_unlock_irqrestore(&hwq->cq_lock, flags);
return completed_reqs;
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
u16 qsize;
int i;
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
hwq->id = i;
qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1;
/* Submission Queue Lower Base Address */
ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr),
MCQ_CFG_n(REG_SQLBA, i));
/* Submission Queue Upper Base Address */
ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr),
MCQ_CFG_n(REG_SQUBA, i));
/* Submission Queue Doorbell Address Offset */
ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i),
MCQ_CFG_n(REG_SQDAO, i));
/* Submission Queue Interrupt Status Address Offset */
ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i),
MCQ_CFG_n(REG_SQISAO, i));
/* Completion Queue Lower Base Address */
ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr),
MCQ_CFG_n(REG_CQLBA, i));
/* Completion Queue Upper Base Address */
ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr),
MCQ_CFG_n(REG_CQUBA, i));
/* Completion Queue Doorbell Address Offset */
ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i),
MCQ_CFG_n(REG_CQDAO, i));
/* Completion Queue Interrupt Status Address Offset */
ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i),
MCQ_CFG_n(REG_CQISAO, i));
/* Save the base addresses for quicker access */
hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP;
hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP;
hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP;
hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP;
/* Reinitializing is needed upon HC reset */
hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0;
/* Enable Tail Entry Push Status interrupt only for non-poll queues */
if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL])
writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE);
/* Completion Queue Enable|Size to Completion Queue Attribute */
ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize,
MCQ_CFG_n(REG_CQATTR, i));
/*
* Submission Qeueue Enable|Size|Completion Queue ID to
* Submission Queue Attribute
*/
ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize |
(i << QUEUE_ID_OFFSET),
MCQ_CFG_n(REG_SQATTR, i));
}
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_make_queues_operational);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba)
{
ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2,
REG_UFS_MEM_CFG);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi);
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg)
{
ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA);
ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA);
}
EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi);
int ufshcd_mcq_init(struct ufs_hba *hba)
{
struct Scsi_Host *host = hba->host;
struct ufs_hw_queue *hwq;
int ret, i;
ret = ufshcd_mcq_config_nr_queues(hba);
if (ret)
return ret;
ret = ufshcd_vops_mcq_config_resource(hba);
if (ret)
return ret;
ret = ufshcd_mcq_vops_op_runtime_config(hba);
if (ret) {
dev_err(hba->dev, "Operation runtime config failed, ret=%d\n",
ret);
return ret;
}
hba->uhq = devm_kzalloc(hba->dev,
hba->nr_hw_queues * sizeof(struct ufs_hw_queue),
GFP_KERNEL);
if (!hba->uhq) {
dev_err(hba->dev, "ufs hw queue memory allocation failed\n");
return -ENOMEM;
}
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
hwq->max_entries = hba->nutrs;
spin_lock_init(&hwq->sq_lock);
spin_lock_init(&hwq->cq_lock);
mutex_init(&hwq->sq_mutex);
}
/* The very first HW queue serves device commands */
hba->dev_cmd_queue = &hba->uhq[0];
host->host_tagset = 1;
return 0;
}
static int ufshcd_mcq_sq_stop(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
{
void __iomem *reg;
u32 id = hwq->id, val;
int err;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return -ETIMEDOUT;
writel(SQ_STOP, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
err = read_poll_timeout(readl, val, val & SQ_STS, 20,
MCQ_POLL_US, false, reg);
if (err)
dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
__func__, id, err);
return err;
}
static int ufshcd_mcq_sq_start(struct ufs_hba *hba, struct ufs_hw_queue *hwq)
{
void __iomem *reg;
u32 id = hwq->id, val;
int err;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return -ETIMEDOUT;
writel(SQ_START, mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTC);
reg = mcq_opr_base(hba, OPR_SQD, id) + REG_SQRTS;
err = read_poll_timeout(readl, val, !(val & SQ_STS), 20,
MCQ_POLL_US, false, reg);
if (err)
dev_err(hba->dev, "%s: failed. hwq-id=%d, err=%d\n",
__func__, id, err);
return err;
}
/**
* ufshcd_mcq_sq_cleanup - Clean up submission queue resources
* associated with the pending command.
* @hba: per adapter instance.
* @task_tag: The command's task tag.
*
* Return: 0 for success; error code otherwise.
*/
int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
struct scsi_cmnd *cmd = lrbp->cmd;
struct ufs_hw_queue *hwq;
void __iomem *reg, *opr_sqd_base;
u32 nexus, id, val;
int err;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return -ETIMEDOUT;
if (task_tag != hba->nutrs - UFSHCD_NUM_RESERVED) {
if (!cmd)
return -EINVAL;
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
} else {
hwq = hba->dev_cmd_queue;
}
id = hwq->id;
mutex_lock(&hwq->sq_mutex);
/* stop the SQ fetching before working on it */
err = ufshcd_mcq_sq_stop(hba, hwq);
if (err)
goto unlock;
/* SQCTI = EXT_IID, IID, LUN, Task Tag */
nexus = lrbp->lun << 8 | task_tag;
opr_sqd_base = mcq_opr_base(hba, OPR_SQD, id);
writel(nexus, opr_sqd_base + REG_SQCTI);
/* SQRTCy.ICU = 1 */
writel(SQ_ICU, opr_sqd_base + REG_SQRTC);
/* Poll SQRTSy.CUS = 1. Return result from SQRTSy.RTC */
reg = opr_sqd_base + REG_SQRTS;
err = read_poll_timeout(readl, val, val & SQ_CUS, 20,
MCQ_POLL_US, false, reg);
if (err)
dev_err(hba->dev, "%s: failed. hwq=%d, tag=%d err=%ld\n",
__func__, id, task_tag,
FIELD_GET(SQ_ICU_ERR_CODE_MASK, readl(reg)));
if (ufshcd_mcq_sq_start(hba, hwq))
err = -ETIMEDOUT;
unlock:
mutex_unlock(&hwq->sq_mutex);
return err;
}
/**
* ufshcd_mcq_nullify_sqe - Nullify the submission queue entry.
* Write the sqe's Command Type to 0xF. The host controller will not
* fetch any sqe with Command Type = 0xF.
*
* @utrd: UTP Transfer Request Descriptor to be nullified.
*/
static void ufshcd_mcq_nullify_sqe(struct utp_transfer_req_desc *utrd)
{
utrd->header.command_type = 0xf;
}
/**
* ufshcd_mcq_sqe_search - Search for the command in the submission queue
* If the command is in the submission queue and not issued to the device yet,
* nullify the sqe so the host controller will skip fetching the sqe.
*
* @hba: per adapter instance.
* @hwq: Hardware Queue to be searched.
* @task_tag: The command's task tag.
*
* Return: true if the SQE containing the command is present in the SQ
* (not fetched by the controller); returns false if the SQE is not in the SQ.
*/
static bool ufshcd_mcq_sqe_search(struct ufs_hba *hba,
struct ufs_hw_queue *hwq, int task_tag)
{
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
struct utp_transfer_req_desc *utrd;
__le64 cmd_desc_base_addr;
bool ret = false;
u64 addr, match;
u32 sq_head_slot;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_RTC)
return true;
mutex_lock(&hwq->sq_mutex);
ufshcd_mcq_sq_stop(hba, hwq);
sq_head_slot = ufshcd_mcq_get_sq_head_slot(hwq);
if (sq_head_slot == hwq->sq_tail_slot)
goto out;
cmd_desc_base_addr = lrbp->utr_descriptor_ptr->command_desc_base_addr;
addr = le64_to_cpu(cmd_desc_base_addr) & CQE_UCD_BA;
while (sq_head_slot != hwq->sq_tail_slot) {
utrd = hwq->sqe_base_addr +
sq_head_slot * sizeof(struct utp_transfer_req_desc);
match = le64_to_cpu(utrd->command_desc_base_addr) & CQE_UCD_BA;
if (addr == match) {
ufshcd_mcq_nullify_sqe(utrd);
ret = true;
goto out;
}
sq_head_slot++;
if (sq_head_slot == hwq->max_entries)
sq_head_slot = 0;
}
out:
ufshcd_mcq_sq_start(hba, hwq);
mutex_unlock(&hwq->sq_mutex);
return ret;
}
/**
* ufshcd_mcq_abort - Abort the command in MCQ.
* @cmd: The command to be aborted.
*
* Return: SUCCESS or FAILED error codes
*/
int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host = cmd->device->host;
struct ufs_hba *hba = shost_priv(host);
int tag = scsi_cmd_to_rq(cmd)->tag;
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
struct ufs_hw_queue *hwq;
int err = FAILED;
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
dev_err(hba->dev,
"%s: skip abort. cmd at tag %d already completed.\n",
__func__, tag);
goto out;
}
/* Skip task abort in case previous aborts failed and report failure */
if (lrbp->req_abort_skip) {
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
__func__, tag);
goto out;
}
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
/*
* Failure. The command should not be "stuck" in SQ for
* a long time which resulted in command being aborted.
*/
dev_err(hba->dev, "%s: cmd found in sq. hwq=%d, tag=%d\n",
__func__, hwq->id, tag);
goto out;
}
/*
* The command is not in the submission queue, and it is not
* in the completion queue either. Query the device to see if
* the command is being processed in the device.
*/
if (ufshcd_try_to_abort_task(hba, tag)) {
dev_err(hba->dev, "%s: device abort failed %d\n", __func__, err);
lrbp->req_abort_skip = true;
goto out;
}
err = SUCCESS;
if (ufshcd_cmd_inflight(lrbp->cmd))
ufshcd_release_scsi_cmd(hba, lrbp);
out:
return err;
}
| linux-master | drivers/ufs/core/ufs-mcq.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Western Digital Corporation
#include <linux/err.h>
#include <linux/string.h>
#include <linux/bitfield.h>
#include <asm/unaligned.h>
#include <ufs/ufs.h>
#include "ufs-sysfs.h"
#include "ufshcd-priv.h"
static const char *ufshcd_uic_link_state_to_string(
enum uic_link_state state)
{
switch (state) {
case UIC_LINK_OFF_STATE: return "OFF";
case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
case UIC_LINK_BROKEN_STATE: return "BROKEN";
default: return "UNKNOWN";
}
}
static const char *ufshcd_ufs_dev_pwr_mode_to_string(
enum ufs_dev_pwr_mode state)
{
switch (state) {
case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
case UFS_SLEEP_PWR_MODE: return "SLEEP";
case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
case UFS_DEEPSLEEP_PWR_MODE: return "DEEPSLEEP";
default: return "UNKNOWN";
}
}
static inline ssize_t ufs_sysfs_pm_lvl_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count,
bool rpm)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_dev_info *dev_info = &hba->dev_info;
unsigned long flags, value;
if (kstrtoul(buf, 0, &value))
return -EINVAL;
if (value >= UFS_PM_LVL_MAX)
return -EINVAL;
if (ufs_pm_lvl_states[value].dev_state == UFS_DEEPSLEEP_PWR_MODE &&
(!(hba->caps & UFSHCD_CAP_DEEPSLEEP) ||
!(dev_info->wspecversion >= 0x310)))
return -EINVAL;
spin_lock_irqsave(hba->host->host_lock, flags);
if (rpm)
hba->rpm_lvl = value;
else
hba->spm_lvl = value;
spin_unlock_irqrestore(hba->host->host_lock, flags);
return count;
}
static ssize_t rpm_lvl_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", hba->rpm_lvl);
}
static ssize_t rpm_lvl_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, true);
}
static ssize_t rpm_target_dev_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
ufs_pm_lvl_states[hba->rpm_lvl].dev_state));
}
static ssize_t rpm_target_link_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
ufs_pm_lvl_states[hba->rpm_lvl].link_state));
}
static ssize_t spm_lvl_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", hba->spm_lvl);
}
static ssize_t spm_lvl_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return ufs_sysfs_pm_lvl_store(dev, attr, buf, count, false);
}
static ssize_t spm_target_dev_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", ufshcd_ufs_dev_pwr_mode_to_string(
ufs_pm_lvl_states[hba->spm_lvl].dev_state));
}
static ssize_t spm_target_link_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%s\n", ufshcd_uic_link_state_to_string(
ufs_pm_lvl_states[hba->spm_lvl].link_state));
}
/* Convert Auto-Hibernate Idle Timer register value to microseconds */
static int ufshcd_ahit_to_us(u32 ahit)
{
int timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, ahit);
int scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, ahit);
for (; scale > 0; --scale)
timer *= UFSHCI_AHIBERN8_SCALE_FACTOR;
return timer;
}
/* Convert microseconds to Auto-Hibernate Idle Timer register value */
static u32 ufshcd_us_to_ahit(unsigned int timer)
{
unsigned int scale;
for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
}
static ssize_t auto_hibern8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 ahit;
int ret;
struct ufs_hba *hba = dev_get_drvdata(dev);
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
ret = -EBUSY;
goto out;
}
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba);
ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
out:
up(&hba->host_sem);
return ret;
}
static ssize_t auto_hibern8_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned int timer;
int ret = 0;
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
if (kstrtouint(buf, 0, &timer))
return -EINVAL;
if (timer > UFSHCI_AHIBERN8_MAX)
return -EINVAL;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
ret = -EBUSY;
goto out;
}
ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
out:
up(&hba->host_sem);
return ret ? ret : count;
}
static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled);
}
static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned int wb_enable;
ssize_t res;
if (!ufshcd_is_wb_allowed(hba) || (ufshcd_is_clkscaling_supported(hba)
&& ufshcd_enable_wb_if_scaling_up(hba))) {
/*
* If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
* on/off will be done while clock scaling up/down.
*/
dev_warn(dev, "It is not allowed to configure WB!\n");
return -EOPNOTSUPP;
}
if (kstrtouint(buf, 0, &wb_enable))
return -EINVAL;
if (wb_enable != 0 && wb_enable != 1)
return -EINVAL;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
res = -EBUSY;
goto out;
}
ufshcd_rpm_get_sync(hba);
res = ufshcd_wb_toggle(hba, wb_enable);
ufshcd_rpm_put_sync(hba);
out:
up(&hba->host_sem);
return res < 0 ? res : count;
}
static ssize_t enable_wb_buf_flush_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", hba->dev_info.wb_buf_flush_enabled);
}
static ssize_t enable_wb_buf_flush_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned int enable_wb_buf_flush;
ssize_t res;
if (!ufshcd_is_wb_buf_flush_allowed(hba)) {
dev_warn(dev, "It is not allowed to configure WB buf flushing!\n");
return -EOPNOTSUPP;
}
if (kstrtouint(buf, 0, &enable_wb_buf_flush))
return -EINVAL;
if (enable_wb_buf_flush != 0 && enable_wb_buf_flush != 1)
return -EINVAL;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
res = -EBUSY;
goto out;
}
ufshcd_rpm_get_sync(hba);
res = ufshcd_wb_toggle_buf_flush(hba, enable_wb_buf_flush);
ufshcd_rpm_put_sync(hba);
out:
up(&hba->host_sem);
return res < 0 ? res : count;
}
static ssize_t wb_flush_threshold_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%u\n", hba->vps->wb_flush_threshold);
}
static ssize_t wb_flush_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned int wb_flush_threshold;
if (kstrtouint(buf, 0, &wb_flush_threshold))
return -EINVAL;
/* The range of values for wb_flush_threshold is (0,10] */
if (wb_flush_threshold > UFS_WB_BUF_REMAIN_PERCENT(100) ||
wb_flush_threshold == 0) {
dev_err(dev, "The value of wb_flush_threshold is invalid!\n");
return -EINVAL;
}
hba->vps->wb_flush_threshold = wb_flush_threshold;
return count;
}
static DEVICE_ATTR_RW(rpm_lvl);
static DEVICE_ATTR_RO(rpm_target_dev_state);
static DEVICE_ATTR_RO(rpm_target_link_state);
static DEVICE_ATTR_RW(spm_lvl);
static DEVICE_ATTR_RO(spm_target_dev_state);
static DEVICE_ATTR_RO(spm_target_link_state);
static DEVICE_ATTR_RW(auto_hibern8);
static DEVICE_ATTR_RW(wb_on);
static DEVICE_ATTR_RW(enable_wb_buf_flush);
static DEVICE_ATTR_RW(wb_flush_threshold);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
&dev_attr_rpm_target_dev_state.attr,
&dev_attr_rpm_target_link_state.attr,
&dev_attr_spm_lvl.attr,
&dev_attr_spm_target_dev_state.attr,
&dev_attr_spm_target_link_state.attr,
&dev_attr_auto_hibern8.attr,
&dev_attr_wb_on.attr,
&dev_attr_enable_wb_buf_flush.attr,
&dev_attr_wb_flush_threshold.attr,
NULL
};
static const struct attribute_group ufs_sysfs_default_group = {
.attrs = ufs_sysfs_ufshcd_attrs,
};
static ssize_t clock_scaling_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", ufshcd_is_clkscaling_supported(hba));
}
static ssize_t write_booster_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", ufshcd_is_wb_allowed(hba));
}
static DEVICE_ATTR_RO(clock_scaling);
static DEVICE_ATTR_RO(write_booster);
/*
* See Documentation/ABI/testing/sysfs-driver-ufs for the semantics of this
* group.
*/
static struct attribute *ufs_sysfs_capabilities_attrs[] = {
&dev_attr_clock_scaling.attr,
&dev_attr_write_booster.attr,
NULL
};
static const struct attribute_group ufs_sysfs_capabilities_group = {
.name = "capabilities",
.attrs = ufs_sysfs_capabilities_attrs,
};
static ssize_t monitor_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%d\n", hba->monitor.enabled);
}
static ssize_t monitor_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned long value, flags;
if (kstrtoul(buf, 0, &value))
return -EINVAL;
value = !!value;
spin_lock_irqsave(hba->host->host_lock, flags);
if (value == hba->monitor.enabled)
goto out_unlock;
if (!value) {
memset(&hba->monitor, 0, sizeof(hba->monitor));
} else {
hba->monitor.enabled = true;
hba->monitor.enabled_ts = ktime_get();
}
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
return count;
}
static ssize_t monitor_chunk_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", hba->monitor.chunk_size);
}
static ssize_t monitor_chunk_size_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned long value, flags;
if (kstrtoul(buf, 0, &value))
return -EINVAL;
spin_lock_irqsave(hba->host->host_lock, flags);
/* Only allow chunk size change when monitor is disabled */
if (!hba->monitor.enabled)
hba->monitor.chunk_size = value;
spin_unlock_irqrestore(hba->host->host_lock, flags);
return count;
}
static ssize_t read_total_sectors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[READ]);
}
static ssize_t read_total_busy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%llu\n",
ktime_to_us(hba->monitor.total_busy[READ]));
}
static ssize_t read_nr_requests_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[READ]);
}
static ssize_t read_req_latency_avg_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_hba_monitor *m = &hba->monitor;
return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[READ]),
m->nr_req[READ]));
}
static ssize_t read_req_latency_max_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%llu\n",
ktime_to_us(hba->monitor.lat_max[READ]));
}
static ssize_t read_req_latency_min_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%llu\n",
ktime_to_us(hba->monitor.lat_min[READ]));
}
static ssize_t read_req_latency_sum_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%llu\n",
ktime_to_us(hba->monitor.lat_sum[READ]));
}
static ssize_t write_total_sectors_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", hba->monitor.nr_sec_rw[WRITE]);
}
static ssize_t write_total_busy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%llu\n",
ktime_to_us(hba->monitor.total_busy[WRITE]));
}
static ssize_t write_nr_requests_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%lu\n", hba->monitor.nr_req[WRITE]);
}
static ssize_t write_req_latency_avg_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_hba_monitor *m = &hba->monitor;
return sysfs_emit(buf, "%llu\n", div_u64(ktime_to_us(m->lat_sum[WRITE]),
m->nr_req[WRITE]));
}
static ssize_t write_req_latency_max_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%llu\n",
ktime_to_us(hba->monitor.lat_max[WRITE]));
}
static ssize_t write_req_latency_min_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%llu\n",
ktime_to_us(hba->monitor.lat_min[WRITE]));
}
static ssize_t write_req_latency_sum_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
return sysfs_emit(buf, "%llu\n",
ktime_to_us(hba->monitor.lat_sum[WRITE]));
}
static DEVICE_ATTR_RW(monitor_enable);
static DEVICE_ATTR_RW(monitor_chunk_size);
static DEVICE_ATTR_RO(read_total_sectors);
static DEVICE_ATTR_RO(read_total_busy);
static DEVICE_ATTR_RO(read_nr_requests);
static DEVICE_ATTR_RO(read_req_latency_avg);
static DEVICE_ATTR_RO(read_req_latency_max);
static DEVICE_ATTR_RO(read_req_latency_min);
static DEVICE_ATTR_RO(read_req_latency_sum);
static DEVICE_ATTR_RO(write_total_sectors);
static DEVICE_ATTR_RO(write_total_busy);
static DEVICE_ATTR_RO(write_nr_requests);
static DEVICE_ATTR_RO(write_req_latency_avg);
static DEVICE_ATTR_RO(write_req_latency_max);
static DEVICE_ATTR_RO(write_req_latency_min);
static DEVICE_ATTR_RO(write_req_latency_sum);
static struct attribute *ufs_sysfs_monitor_attrs[] = {
&dev_attr_monitor_enable.attr,
&dev_attr_monitor_chunk_size.attr,
&dev_attr_read_total_sectors.attr,
&dev_attr_read_total_busy.attr,
&dev_attr_read_nr_requests.attr,
&dev_attr_read_req_latency_avg.attr,
&dev_attr_read_req_latency_max.attr,
&dev_attr_read_req_latency_min.attr,
&dev_attr_read_req_latency_sum.attr,
&dev_attr_write_total_sectors.attr,
&dev_attr_write_total_busy.attr,
&dev_attr_write_nr_requests.attr,
&dev_attr_write_req_latency_avg.attr,
&dev_attr_write_req_latency_max.attr,
&dev_attr_write_req_latency_min.attr,
&dev_attr_write_req_latency_sum.attr,
NULL
};
static const struct attribute_group ufs_sysfs_monitor_group = {
.name = "monitor",
.attrs = ufs_sysfs_monitor_attrs,
};
static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
u8 desc_index,
u8 param_offset,
u8 *sysfs_buf,
u8 param_size)
{
u8 desc_buf[8] = {0};
int ret;
if (param_size > 8)
return -EINVAL;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
ret = -EBUSY;
goto out;
}
ufshcd_rpm_get_sync(hba);
ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
param_offset, desc_buf, param_size);
ufshcd_rpm_put_sync(hba);
if (ret) {
ret = -EINVAL;
goto out;
}
switch (param_size) {
case 1:
ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
break;
case 2:
ret = sysfs_emit(sysfs_buf, "0x%04X\n",
get_unaligned_be16(desc_buf));
break;
case 4:
ret = sysfs_emit(sysfs_buf, "0x%08X\n",
get_unaligned_be32(desc_buf));
break;
case 8:
ret = sysfs_emit(sysfs_buf, "0x%016llX\n",
get_unaligned_be64(desc_buf));
break;
}
out:
up(&hba->host_sem);
return ret;
}
#define UFS_DESC_PARAM(_name, _puname, _duname, _size) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct ufs_hba *hba = dev_get_drvdata(dev); \
return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
0, _duname##_DESC_PARAM##_puname, buf, _size); \
} \
static DEVICE_ATTR_RO(_name)
#define UFS_DEVICE_DESC_PARAM(_name, _uname, _size) \
UFS_DESC_PARAM(_name, _uname, DEVICE, _size)
UFS_DEVICE_DESC_PARAM(device_type, _DEVICE_TYPE, 1);
UFS_DEVICE_DESC_PARAM(device_class, _DEVICE_CLASS, 1);
UFS_DEVICE_DESC_PARAM(device_sub_class, _DEVICE_SUB_CLASS, 1);
UFS_DEVICE_DESC_PARAM(protocol, _PRTCL, 1);
UFS_DEVICE_DESC_PARAM(number_of_luns, _NUM_LU, 1);
UFS_DEVICE_DESC_PARAM(number_of_wluns, _NUM_WLU, 1);
UFS_DEVICE_DESC_PARAM(boot_enable, _BOOT_ENBL, 1);
UFS_DEVICE_DESC_PARAM(descriptor_access_enable, _DESC_ACCSS_ENBL, 1);
UFS_DEVICE_DESC_PARAM(initial_power_mode, _INIT_PWR_MODE, 1);
UFS_DEVICE_DESC_PARAM(high_priority_lun, _HIGH_PR_LUN, 1);
UFS_DEVICE_DESC_PARAM(secure_removal_type, _SEC_RMV_TYPE, 1);
UFS_DEVICE_DESC_PARAM(support_security_lun, _SEC_LU, 1);
UFS_DEVICE_DESC_PARAM(bkops_termination_latency, _BKOP_TERM_LT, 1);
UFS_DEVICE_DESC_PARAM(initial_active_icc_level, _ACTVE_ICC_LVL, 1);
UFS_DEVICE_DESC_PARAM(specification_version, _SPEC_VER, 2);
UFS_DEVICE_DESC_PARAM(manufacturing_date, _MANF_DATE, 2);
UFS_DEVICE_DESC_PARAM(manufacturer_id, _MANF_ID, 2);
UFS_DEVICE_DESC_PARAM(rtt_capability, _RTT_CAP, 1);
UFS_DEVICE_DESC_PARAM(rtc_update, _FRQ_RTC, 2);
UFS_DEVICE_DESC_PARAM(ufs_features, _UFS_FEAT, 1);
UFS_DEVICE_DESC_PARAM(ffu_timeout, _FFU_TMT, 1);
UFS_DEVICE_DESC_PARAM(queue_depth, _Q_DPTH, 1);
UFS_DEVICE_DESC_PARAM(device_version, _DEV_VER, 2);
UFS_DEVICE_DESC_PARAM(number_of_secure_wpa, _NUM_SEC_WPA, 1);
UFS_DEVICE_DESC_PARAM(psa_max_data_size, _PSA_MAX_DATA, 4);
UFS_DEVICE_DESC_PARAM(psa_state_timeout, _PSA_TMT, 1);
UFS_DEVICE_DESC_PARAM(ext_feature_sup, _EXT_UFS_FEATURE_SUP, 4);
UFS_DEVICE_DESC_PARAM(wb_presv_us_en, _WB_PRESRV_USRSPC_EN, 1);
UFS_DEVICE_DESC_PARAM(wb_type, _WB_TYPE, 1);
UFS_DEVICE_DESC_PARAM(wb_shared_alloc_units, _WB_SHARED_ALLOC_UNITS, 4);
static struct attribute *ufs_sysfs_device_descriptor[] = {
&dev_attr_device_type.attr,
&dev_attr_device_class.attr,
&dev_attr_device_sub_class.attr,
&dev_attr_protocol.attr,
&dev_attr_number_of_luns.attr,
&dev_attr_number_of_wluns.attr,
&dev_attr_boot_enable.attr,
&dev_attr_descriptor_access_enable.attr,
&dev_attr_initial_power_mode.attr,
&dev_attr_high_priority_lun.attr,
&dev_attr_secure_removal_type.attr,
&dev_attr_support_security_lun.attr,
&dev_attr_bkops_termination_latency.attr,
&dev_attr_initial_active_icc_level.attr,
&dev_attr_specification_version.attr,
&dev_attr_manufacturing_date.attr,
&dev_attr_manufacturer_id.attr,
&dev_attr_rtt_capability.attr,
&dev_attr_rtc_update.attr,
&dev_attr_ufs_features.attr,
&dev_attr_ffu_timeout.attr,
&dev_attr_queue_depth.attr,
&dev_attr_device_version.attr,
&dev_attr_number_of_secure_wpa.attr,
&dev_attr_psa_max_data_size.attr,
&dev_attr_psa_state_timeout.attr,
&dev_attr_ext_feature_sup.attr,
&dev_attr_wb_presv_us_en.attr,
&dev_attr_wb_type.attr,
&dev_attr_wb_shared_alloc_units.attr,
NULL,
};
static const struct attribute_group ufs_sysfs_device_descriptor_group = {
.name = "device_descriptor",
.attrs = ufs_sysfs_device_descriptor,
};
#define UFS_INTERCONNECT_DESC_PARAM(_name, _uname, _size) \
UFS_DESC_PARAM(_name, _uname, INTERCONNECT, _size)
UFS_INTERCONNECT_DESC_PARAM(unipro_version, _UNIPRO_VER, 2);
UFS_INTERCONNECT_DESC_PARAM(mphy_version, _MPHY_VER, 2);
static struct attribute *ufs_sysfs_interconnect_descriptor[] = {
&dev_attr_unipro_version.attr,
&dev_attr_mphy_version.attr,
NULL,
};
static const struct attribute_group ufs_sysfs_interconnect_descriptor_group = {
.name = "interconnect_descriptor",
.attrs = ufs_sysfs_interconnect_descriptor,
};
#define UFS_GEOMETRY_DESC_PARAM(_name, _uname, _size) \
UFS_DESC_PARAM(_name, _uname, GEOMETRY, _size)
UFS_GEOMETRY_DESC_PARAM(raw_device_capacity, _DEV_CAP, 8);
UFS_GEOMETRY_DESC_PARAM(max_number_of_luns, _MAX_NUM_LUN, 1);
UFS_GEOMETRY_DESC_PARAM(segment_size, _SEG_SIZE, 4);
UFS_GEOMETRY_DESC_PARAM(allocation_unit_size, _ALLOC_UNIT_SIZE, 1);
UFS_GEOMETRY_DESC_PARAM(min_addressable_block_size, _MIN_BLK_SIZE, 1);
UFS_GEOMETRY_DESC_PARAM(optimal_read_block_size, _OPT_RD_BLK_SIZE, 1);
UFS_GEOMETRY_DESC_PARAM(optimal_write_block_size, _OPT_WR_BLK_SIZE, 1);
UFS_GEOMETRY_DESC_PARAM(max_in_buffer_size, _MAX_IN_BUF_SIZE, 1);
UFS_GEOMETRY_DESC_PARAM(max_out_buffer_size, _MAX_OUT_BUF_SIZE, 1);
UFS_GEOMETRY_DESC_PARAM(rpmb_rw_size, _RPMB_RW_SIZE, 1);
UFS_GEOMETRY_DESC_PARAM(dyn_capacity_resource_policy, _DYN_CAP_RSRC_PLC, 1);
UFS_GEOMETRY_DESC_PARAM(data_ordering, _DATA_ORDER, 1);
UFS_GEOMETRY_DESC_PARAM(max_number_of_contexts, _MAX_NUM_CTX, 1);
UFS_GEOMETRY_DESC_PARAM(sys_data_tag_unit_size, _TAG_UNIT_SIZE, 1);
UFS_GEOMETRY_DESC_PARAM(sys_data_tag_resource_size, _TAG_RSRC_SIZE, 1);
UFS_GEOMETRY_DESC_PARAM(secure_removal_types, _SEC_RM_TYPES, 1);
UFS_GEOMETRY_DESC_PARAM(memory_types, _MEM_TYPES, 2);
UFS_GEOMETRY_DESC_PARAM(sys_code_memory_max_alloc_units,
_SCM_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(sys_code_memory_capacity_adjustment_factor,
_SCM_CAP_ADJ_FCTR, 2);
UFS_GEOMETRY_DESC_PARAM(non_persist_memory_max_alloc_units,
_NPM_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(non_persist_memory_capacity_adjustment_factor,
_NPM_CAP_ADJ_FCTR, 2);
UFS_GEOMETRY_DESC_PARAM(enh1_memory_max_alloc_units,
_ENM1_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh1_memory_capacity_adjustment_factor,
_ENM1_CAP_ADJ_FCTR, 2);
UFS_GEOMETRY_DESC_PARAM(enh2_memory_max_alloc_units,
_ENM2_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh2_memory_capacity_adjustment_factor,
_ENM2_CAP_ADJ_FCTR, 2);
UFS_GEOMETRY_DESC_PARAM(enh3_memory_max_alloc_units,
_ENM3_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh3_memory_capacity_adjustment_factor,
_ENM3_CAP_ADJ_FCTR, 2);
UFS_GEOMETRY_DESC_PARAM(enh4_memory_max_alloc_units,
_ENM4_MAX_NUM_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(enh4_memory_capacity_adjustment_factor,
_ENM4_CAP_ADJ_FCTR, 2);
UFS_GEOMETRY_DESC_PARAM(wb_max_alloc_units, _WB_MAX_ALLOC_UNITS, 4);
UFS_GEOMETRY_DESC_PARAM(wb_max_wb_luns, _WB_MAX_WB_LUNS, 1);
UFS_GEOMETRY_DESC_PARAM(wb_buff_cap_adj, _WB_BUFF_CAP_ADJ, 1);
UFS_GEOMETRY_DESC_PARAM(wb_sup_red_type, _WB_SUP_RED_TYPE, 1);
UFS_GEOMETRY_DESC_PARAM(wb_sup_wb_type, _WB_SUP_WB_TYPE, 1);
static struct attribute *ufs_sysfs_geometry_descriptor[] = {
&dev_attr_raw_device_capacity.attr,
&dev_attr_max_number_of_luns.attr,
&dev_attr_segment_size.attr,
&dev_attr_allocation_unit_size.attr,
&dev_attr_min_addressable_block_size.attr,
&dev_attr_optimal_read_block_size.attr,
&dev_attr_optimal_write_block_size.attr,
&dev_attr_max_in_buffer_size.attr,
&dev_attr_max_out_buffer_size.attr,
&dev_attr_rpmb_rw_size.attr,
&dev_attr_dyn_capacity_resource_policy.attr,
&dev_attr_data_ordering.attr,
&dev_attr_max_number_of_contexts.attr,
&dev_attr_sys_data_tag_unit_size.attr,
&dev_attr_sys_data_tag_resource_size.attr,
&dev_attr_secure_removal_types.attr,
&dev_attr_memory_types.attr,
&dev_attr_sys_code_memory_max_alloc_units.attr,
&dev_attr_sys_code_memory_capacity_adjustment_factor.attr,
&dev_attr_non_persist_memory_max_alloc_units.attr,
&dev_attr_non_persist_memory_capacity_adjustment_factor.attr,
&dev_attr_enh1_memory_max_alloc_units.attr,
&dev_attr_enh1_memory_capacity_adjustment_factor.attr,
&dev_attr_enh2_memory_max_alloc_units.attr,
&dev_attr_enh2_memory_capacity_adjustment_factor.attr,
&dev_attr_enh3_memory_max_alloc_units.attr,
&dev_attr_enh3_memory_capacity_adjustment_factor.attr,
&dev_attr_enh4_memory_max_alloc_units.attr,
&dev_attr_enh4_memory_capacity_adjustment_factor.attr,
&dev_attr_wb_max_alloc_units.attr,
&dev_attr_wb_max_wb_luns.attr,
&dev_attr_wb_buff_cap_adj.attr,
&dev_attr_wb_sup_red_type.attr,
&dev_attr_wb_sup_wb_type.attr,
NULL,
};
static const struct attribute_group ufs_sysfs_geometry_descriptor_group = {
.name = "geometry_descriptor",
.attrs = ufs_sysfs_geometry_descriptor,
};
#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \
UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
static struct attribute *ufs_sysfs_health_descriptor[] = {
&dev_attr_eol_info.attr,
&dev_attr_life_time_estimation_a.attr,
&dev_attr_life_time_estimation_b.attr,
NULL,
};
static const struct attribute_group ufs_sysfs_health_descriptor_group = {
.name = "health_descriptor",
.attrs = ufs_sysfs_health_descriptor,
};
#define UFS_POWER_DESC_PARAM(_name, _uname, _index) \
static ssize_t _name##_index##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct ufs_hba *hba = dev_get_drvdata(dev); \
return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, \
PWR_DESC##_uname##_0 + _index * 2, buf, 2); \
} \
static DEVICE_ATTR_RO(_name##_index)
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 0);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 1);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 2);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 3);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 4);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 5);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 6);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 7);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 8);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 9);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 10);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 11);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 12);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 13);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 14);
UFS_POWER_DESC_PARAM(active_icc_levels_vcc, _ACTIVE_LVLS_VCC, 15);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 0);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 1);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 2);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 3);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 4);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 5);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 6);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 7);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 8);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 9);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 10);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 11);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 12);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 13);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 14);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq, _ACTIVE_LVLS_VCCQ, 15);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 0);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 1);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 2);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 3);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 4);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 5);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 6);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 7);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 8);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 9);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 10);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 11);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 12);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 13);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 14);
UFS_POWER_DESC_PARAM(active_icc_levels_vccq2, _ACTIVE_LVLS_VCCQ2, 15);
static struct attribute *ufs_sysfs_power_descriptor[] = {
&dev_attr_active_icc_levels_vcc0.attr,
&dev_attr_active_icc_levels_vcc1.attr,
&dev_attr_active_icc_levels_vcc2.attr,
&dev_attr_active_icc_levels_vcc3.attr,
&dev_attr_active_icc_levels_vcc4.attr,
&dev_attr_active_icc_levels_vcc5.attr,
&dev_attr_active_icc_levels_vcc6.attr,
&dev_attr_active_icc_levels_vcc7.attr,
&dev_attr_active_icc_levels_vcc8.attr,
&dev_attr_active_icc_levels_vcc9.attr,
&dev_attr_active_icc_levels_vcc10.attr,
&dev_attr_active_icc_levels_vcc11.attr,
&dev_attr_active_icc_levels_vcc12.attr,
&dev_attr_active_icc_levels_vcc13.attr,
&dev_attr_active_icc_levels_vcc14.attr,
&dev_attr_active_icc_levels_vcc15.attr,
&dev_attr_active_icc_levels_vccq0.attr,
&dev_attr_active_icc_levels_vccq1.attr,
&dev_attr_active_icc_levels_vccq2.attr,
&dev_attr_active_icc_levels_vccq3.attr,
&dev_attr_active_icc_levels_vccq4.attr,
&dev_attr_active_icc_levels_vccq5.attr,
&dev_attr_active_icc_levels_vccq6.attr,
&dev_attr_active_icc_levels_vccq7.attr,
&dev_attr_active_icc_levels_vccq8.attr,
&dev_attr_active_icc_levels_vccq9.attr,
&dev_attr_active_icc_levels_vccq10.attr,
&dev_attr_active_icc_levels_vccq11.attr,
&dev_attr_active_icc_levels_vccq12.attr,
&dev_attr_active_icc_levels_vccq13.attr,
&dev_attr_active_icc_levels_vccq14.attr,
&dev_attr_active_icc_levels_vccq15.attr,
&dev_attr_active_icc_levels_vccq20.attr,
&dev_attr_active_icc_levels_vccq21.attr,
&dev_attr_active_icc_levels_vccq22.attr,
&dev_attr_active_icc_levels_vccq23.attr,
&dev_attr_active_icc_levels_vccq24.attr,
&dev_attr_active_icc_levels_vccq25.attr,
&dev_attr_active_icc_levels_vccq26.attr,
&dev_attr_active_icc_levels_vccq27.attr,
&dev_attr_active_icc_levels_vccq28.attr,
&dev_attr_active_icc_levels_vccq29.attr,
&dev_attr_active_icc_levels_vccq210.attr,
&dev_attr_active_icc_levels_vccq211.attr,
&dev_attr_active_icc_levels_vccq212.attr,
&dev_attr_active_icc_levels_vccq213.attr,
&dev_attr_active_icc_levels_vccq214.attr,
&dev_attr_active_icc_levels_vccq215.attr,
NULL,
};
static const struct attribute_group ufs_sysfs_power_descriptor_group = {
.name = "power_descriptor",
.attrs = ufs_sysfs_power_descriptor,
};
#define UFS_STRING_DESCRIPTOR(_name, _pname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
u8 index; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
int ret; \
int desc_len = QUERY_DESC_MAX_SIZE; \
u8 *desc_buf; \
\
down(&hba->host_sem); \
if (!ufshcd_is_user_access_allowed(hba)) { \
up(&hba->host_sem); \
return -EBUSY; \
} \
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
if (!desc_buf) { \
up(&hba->host_sem); \
return -ENOMEM; \
} \
ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_descriptor_retry(hba, \
UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
0, 0, desc_buf, &desc_len); \
if (ret) { \
ret = -EINVAL; \
goto out; \
} \
index = desc_buf[DEVICE_DESC_PARAM##_pname]; \
kfree(desc_buf); \
desc_buf = NULL; \
ret = ufshcd_read_string_desc(hba, index, &desc_buf, \
SD_ASCII_STD); \
if (ret < 0) \
goto out; \
ret = sysfs_emit(buf, "%s\n", desc_buf); \
out: \
ufshcd_rpm_put_sync(hba); \
kfree(desc_buf); \
up(&hba->host_sem); \
return ret; \
} \
static DEVICE_ATTR_RO(_name)
UFS_STRING_DESCRIPTOR(manufacturer_name, _MANF_NAME);
UFS_STRING_DESCRIPTOR(product_name, _PRDCT_NAME);
UFS_STRING_DESCRIPTOR(oem_id, _OEM_ID);
UFS_STRING_DESCRIPTOR(serial_number, _SN);
UFS_STRING_DESCRIPTOR(product_revision, _PRDCT_REV);
static struct attribute *ufs_sysfs_string_descriptors[] = {
&dev_attr_manufacturer_name.attr,
&dev_attr_product_name.attr,
&dev_attr_oem_id.attr,
&dev_attr_serial_number.attr,
&dev_attr_product_revision.attr,
NULL,
};
static const struct attribute_group ufs_sysfs_string_descriptors_group = {
.name = "string_descriptors",
.attrs = ufs_sysfs_string_descriptors,
};
static inline bool ufshcd_is_wb_flags(enum flag_idn idn)
{
return idn >= QUERY_FLAG_IDN_WB_EN &&
idn <= QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8;
}
#define UFS_FLAG(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
bool flag; \
u8 index = 0; \
int ret; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
\
down(&hba->host_sem); \
if (!ufshcd_is_user_access_allowed(hba)) { \
up(&hba->host_sem); \
return -EBUSY; \
} \
if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
QUERY_FLAG_IDN##_uname, index, &flag); \
ufshcd_rpm_put_sync(hba); \
if (ret) { \
ret = -EINVAL; \
goto out; \
} \
ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
out: \
up(&hba->host_sem); \
return ret; \
} \
static DEVICE_ATTR_RO(_name)
UFS_FLAG(device_init, _FDEVICEINIT);
UFS_FLAG(permanent_wpe, _PERMANENT_WPE);
UFS_FLAG(power_on_wpe, _PWR_ON_WPE);
UFS_FLAG(bkops_enable, _BKOPS_EN);
UFS_FLAG(life_span_mode_enable, _LIFE_SPAN_MODE_ENABLE);
UFS_FLAG(phy_resource_removal, _FPHYRESOURCEREMOVAL);
UFS_FLAG(busy_rtc, _BUSY_RTC);
UFS_FLAG(disable_fw_update, _PERMANENTLY_DISABLE_FW_UPDATE);
UFS_FLAG(wb_enable, _WB_EN);
UFS_FLAG(wb_flush_en, _WB_BUFF_FLUSH_EN);
UFS_FLAG(wb_flush_during_h8, _WB_BUFF_FLUSH_DURING_HIBERN8);
static struct attribute *ufs_sysfs_device_flags[] = {
&dev_attr_device_init.attr,
&dev_attr_permanent_wpe.attr,
&dev_attr_power_on_wpe.attr,
&dev_attr_bkops_enable.attr,
&dev_attr_life_span_mode_enable.attr,
&dev_attr_phy_resource_removal.attr,
&dev_attr_busy_rtc.attr,
&dev_attr_disable_fw_update.attr,
&dev_attr_wb_enable.attr,
&dev_attr_wb_flush_en.attr,
&dev_attr_wb_flush_during_h8.attr,
NULL,
};
static const struct attribute_group ufs_sysfs_flags_group = {
.name = "flags",
.attrs = ufs_sysfs_device_flags,
};
static inline bool ufshcd_is_wb_attrs(enum attr_idn idn)
{
return idn >= QUERY_ATTR_IDN_WB_FLUSH_STATUS &&
idn <= QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE;
}
#define UFS_ATTRIBUTE(_name, _uname) \
static ssize_t _name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct ufs_hba *hba = dev_get_drvdata(dev); \
u32 value; \
int ret; \
u8 index = 0; \
\
down(&hba->host_sem); \
if (!ufshcd_is_user_access_allowed(hba)) { \
up(&hba->host_sem); \
return -EBUSY; \
} \
if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
ufshcd_rpm_get_sync(hba); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
QUERY_ATTR_IDN##_uname, index, 0, &value); \
ufshcd_rpm_put_sync(hba); \
if (ret) { \
ret = -EINVAL; \
goto out; \
} \
ret = sysfs_emit(buf, "0x%08X\n", value); \
out: \
up(&hba->host_sem); \
return ret; \
} \
static DEVICE_ATTR_RO(_name)
UFS_ATTRIBUTE(boot_lun_enabled, _BOOT_LU_EN);
UFS_ATTRIBUTE(current_power_mode, _POWER_MODE);
UFS_ATTRIBUTE(active_icc_level, _ACTIVE_ICC_LVL);
UFS_ATTRIBUTE(ooo_data_enabled, _OOO_DATA_EN);
UFS_ATTRIBUTE(bkops_status, _BKOPS_STATUS);
UFS_ATTRIBUTE(purge_status, _PURGE_STATUS);
UFS_ATTRIBUTE(max_data_in_size, _MAX_DATA_IN);
UFS_ATTRIBUTE(max_data_out_size, _MAX_DATA_OUT);
UFS_ATTRIBUTE(reference_clock_frequency, _REF_CLK_FREQ);
UFS_ATTRIBUTE(configuration_descriptor_lock, _CONF_DESC_LOCK);
UFS_ATTRIBUTE(max_number_of_rtt, _MAX_NUM_OF_RTT);
UFS_ATTRIBUTE(exception_event_control, _EE_CONTROL);
UFS_ATTRIBUTE(exception_event_status, _EE_STATUS);
UFS_ATTRIBUTE(ffu_status, _FFU_STATUS);
UFS_ATTRIBUTE(psa_state, _PSA_STATE);
UFS_ATTRIBUTE(psa_data_size, _PSA_DATA_SIZE);
UFS_ATTRIBUTE(wb_flush_status, _WB_FLUSH_STATUS);
UFS_ATTRIBUTE(wb_avail_buf, _AVAIL_WB_BUFF_SIZE);
UFS_ATTRIBUTE(wb_life_time_est, _WB_BUFF_LIFE_TIME_EST);
UFS_ATTRIBUTE(wb_cur_buf, _CURR_WB_BUFF_SIZE);
static struct attribute *ufs_sysfs_attributes[] = {
&dev_attr_boot_lun_enabled.attr,
&dev_attr_current_power_mode.attr,
&dev_attr_active_icc_level.attr,
&dev_attr_ooo_data_enabled.attr,
&dev_attr_bkops_status.attr,
&dev_attr_purge_status.attr,
&dev_attr_max_data_in_size.attr,
&dev_attr_max_data_out_size.attr,
&dev_attr_reference_clock_frequency.attr,
&dev_attr_configuration_descriptor_lock.attr,
&dev_attr_max_number_of_rtt.attr,
&dev_attr_exception_event_control.attr,
&dev_attr_exception_event_status.attr,
&dev_attr_ffu_status.attr,
&dev_attr_psa_state.attr,
&dev_attr_psa_data_size.attr,
&dev_attr_wb_flush_status.attr,
&dev_attr_wb_avail_buf.attr,
&dev_attr_wb_life_time_est.attr,
&dev_attr_wb_cur_buf.attr,
NULL,
};
static const struct attribute_group ufs_sysfs_attributes_group = {
.name = "attributes",
.attrs = ufs_sysfs_attributes,
};
static const struct attribute_group *ufs_sysfs_groups[] = {
&ufs_sysfs_default_group,
&ufs_sysfs_capabilities_group,
&ufs_sysfs_monitor_group,
&ufs_sysfs_device_descriptor_group,
&ufs_sysfs_interconnect_descriptor_group,
&ufs_sysfs_geometry_descriptor_group,
&ufs_sysfs_health_descriptor_group,
&ufs_sysfs_power_descriptor_group,
&ufs_sysfs_string_descriptors_group,
&ufs_sysfs_flags_group,
&ufs_sysfs_attributes_group,
NULL,
};
#define UFS_LUN_DESC_PARAM(_pname, _puname, _duname, _size) \
static ssize_t _pname##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct scsi_device *sdev = to_scsi_device(dev); \
struct ufs_hba *hba = shost_priv(sdev->host); \
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
return -EINVAL; \
return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
lun, _duname##_DESC_PARAM##_puname, buf, _size); \
} \
static DEVICE_ATTR_RO(_pname)
#define UFS_UNIT_DESC_PARAM(_name, _uname, _size) \
UFS_LUN_DESC_PARAM(_name, _uname, UNIT, _size)
UFS_UNIT_DESC_PARAM(lu_enable, _LU_ENABLE, 1);
UFS_UNIT_DESC_PARAM(boot_lun_id, _BOOT_LUN_ID, 1);
UFS_UNIT_DESC_PARAM(lun_write_protect, _LU_WR_PROTECT, 1);
UFS_UNIT_DESC_PARAM(lun_queue_depth, _LU_Q_DEPTH, 1);
UFS_UNIT_DESC_PARAM(psa_sensitive, _PSA_SENSITIVE, 1);
UFS_UNIT_DESC_PARAM(lun_memory_type, _MEM_TYPE, 1);
UFS_UNIT_DESC_PARAM(data_reliability, _DATA_RELIABILITY, 1);
UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1);
UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8);
UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4);
UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1);
UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8);
UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2);
UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1);
UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4);
static struct attribute *ufs_sysfs_unit_descriptor[] = {
&dev_attr_lu_enable.attr,
&dev_attr_boot_lun_id.attr,
&dev_attr_lun_write_protect.attr,
&dev_attr_lun_queue_depth.attr,
&dev_attr_psa_sensitive.attr,
&dev_attr_lun_memory_type.attr,
&dev_attr_data_reliability.attr,
&dev_attr_logical_block_size.attr,
&dev_attr_logical_block_count.attr,
&dev_attr_erase_block_size.attr,
&dev_attr_provisioning_type.attr,
&dev_attr_physical_memory_resourse_count.attr,
&dev_attr_context_capabilities.attr,
&dev_attr_large_unit_granularity.attr,
&dev_attr_wb_buf_alloc_units.attr,
NULL,
};
static umode_t ufs_unit_descriptor_is_visible(struct kobject *kobj, struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct scsi_device *sdev = to_scsi_device(dev);
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
umode_t mode = attr->mode;
if (lun == UFS_UPIU_BOOT_WLUN || lun == UFS_UPIU_UFS_DEVICE_WLUN)
/* Boot and device WLUN have no unit descriptors */
mode = 0;
if (lun == UFS_UPIU_RPMB_WLUN && attr == &dev_attr_wb_buf_alloc_units.attr)
mode = 0;
return mode;
}
const struct attribute_group ufs_sysfs_unit_descriptor_group = {
.name = "unit_descriptor",
.attrs = ufs_sysfs_unit_descriptor,
.is_visible = ufs_unit_descriptor_is_visible,
};
static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 value;
struct scsi_device *sdev = to_scsi_device(dev);
struct ufs_hba *hba = shost_priv(sdev->host);
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
int ret;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
ret = -EBUSY;
goto out;
}
ufshcd_rpm_get_sync(hba);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
ufshcd_rpm_put_sync(hba);
if (ret) {
ret = -EINVAL;
goto out;
}
ret = sysfs_emit(buf, "0x%08X\n", value);
out:
up(&hba->host_sem);
return ret;
}
static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
static struct attribute *ufs_sysfs_lun_attributes[] = {
&dev_attr_dyn_cap_needed_attribute.attr,
NULL,
};
const struct attribute_group ufs_sysfs_lun_attributes_group = {
.attrs = ufs_sysfs_lun_attributes,
};
void ufs_sysfs_add_nodes(struct device *dev)
{
int ret;
ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
if (ret)
dev_err(dev,
"%s: sysfs groups creation failed (err = %d)\n",
__func__, ret);
}
void ufs_sysfs_remove_nodes(struct device *dev)
{
sysfs_remove_groups(&dev->kobj, ufs_sysfs_groups);
}
| linux-master | drivers/ufs/core/ufs-sysfs.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2020 Intel Corporation
#include <linux/debugfs.h>
#include "ufs-debugfs.h"
#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
static struct dentry *ufs_debugfs_root;
struct ufs_debugfs_attr {
const char *name;
mode_t mode;
const struct file_operations *fops;
};
/* @file corresponds to a debugfs attribute in directory hba->debugfs_root. */
static inline struct ufs_hba *hba_from_file(const struct file *file)
{
return d_inode(file->f_path.dentry->d_parent)->i_private;
}
void __init ufs_debugfs_init(void)
{
ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL);
}
void ufs_debugfs_exit(void)
{
debugfs_remove_recursive(ufs_debugfs_root);
}
static int ufs_debugfs_stats_show(struct seq_file *s, void *data)
{
struct ufs_hba *hba = hba_from_file(s->file);
struct ufs_event_hist *e = hba->ufs_stats.event;
#define PRT(fmt, typ) \
seq_printf(s, fmt, e[UFS_EVT_ ## typ].cnt)
PRT("PHY Adapter Layer errors (except LINERESET): %llu\n", PA_ERR);
PRT("Data Link Layer errors: %llu\n", DL_ERR);
PRT("Network Layer errors: %llu\n", NL_ERR);
PRT("Transport Layer errors: %llu\n", TL_ERR);
PRT("Generic DME errors: %llu\n", DME_ERR);
PRT("Auto-hibernate errors: %llu\n", AUTO_HIBERN8_ERR);
PRT("IS Fatal errors (CEFES, SBFES, HCFES, DFES): %llu\n", FATAL_ERR);
PRT("DME Link Startup errors: %llu\n", LINK_STARTUP_FAIL);
PRT("PM Resume errors: %llu\n", RESUME_ERR);
PRT("PM Suspend errors : %llu\n", SUSPEND_ERR);
PRT("Logical Unit Resets: %llu\n", DEV_RESET);
PRT("Host Resets: %llu\n", HOST_RESET);
PRT("SCSI command aborts: %llu\n", ABORT);
#undef PRT
return 0;
}
DEFINE_SHOW_ATTRIBUTE(ufs_debugfs_stats);
static int ee_usr_mask_get(void *data, u64 *val)
{
struct ufs_hba *hba = data;
*val = hba->ee_usr_mask;
return 0;
}
static int ufs_debugfs_get_user_access(struct ufs_hba *hba)
__acquires(&hba->host_sem)
{
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
up(&hba->host_sem);
return -EBUSY;
}
ufshcd_rpm_get_sync(hba);
return 0;
}
static void ufs_debugfs_put_user_access(struct ufs_hba *hba)
__releases(&hba->host_sem)
{
ufshcd_rpm_put_sync(hba);
up(&hba->host_sem);
}
static int ee_usr_mask_set(void *data, u64 val)
{
struct ufs_hba *hba = data;
int err;
if (val & ~(u64)MASK_EE_STATUS)
return -EINVAL;
err = ufs_debugfs_get_user_access(hba);
if (err)
return err;
err = ufshcd_update_ee_usr_mask(hba, val, MASK_EE_STATUS);
ufs_debugfs_put_user_access(hba);
return err;
}
DEFINE_DEBUGFS_ATTRIBUTE(ee_usr_mask_fops, ee_usr_mask_get, ee_usr_mask_set, "%#llx\n");
void ufs_debugfs_exception_event(struct ufs_hba *hba, u16 status)
{
bool chgd = false;
u16 ee_ctrl_mask;
int err = 0;
if (!hba->debugfs_ee_rate_limit_ms || !status)
return;
mutex_lock(&hba->ee_ctrl_mutex);
ee_ctrl_mask = hba->ee_drv_mask | (hba->ee_usr_mask & ~status);
chgd = ee_ctrl_mask != hba->ee_ctrl_mask;
if (chgd) {
err = __ufshcd_write_ee_control(hba, ee_ctrl_mask);
if (err)
dev_err(hba->dev, "%s: failed to write ee control %d\n",
__func__, err);
}
mutex_unlock(&hba->ee_ctrl_mutex);
if (chgd && !err) {
unsigned long delay = msecs_to_jiffies(hba->debugfs_ee_rate_limit_ms);
queue_delayed_work(system_freezable_wq, &hba->debugfs_ee_work, delay);
}
}
static void ufs_debugfs_restart_ee(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba, debugfs_ee_work.work);
if (!hba->ee_usr_mask || pm_runtime_suspended(hba->dev) ||
ufs_debugfs_get_user_access(hba))
return;
ufshcd_write_ee_control(hba);
ufs_debugfs_put_user_access(hba);
}
static int ufs_saved_err_show(struct seq_file *s, void *data)
{
struct ufs_debugfs_attr *attr = s->private;
struct ufs_hba *hba = hba_from_file(s->file);
const int *p;
if (strcmp(attr->name, "saved_err") == 0) {
p = &hba->saved_err;
} else if (strcmp(attr->name, "saved_uic_err") == 0) {
p = &hba->saved_uic_err;
} else {
return -ENOENT;
}
seq_printf(s, "%d\n", *p);
return 0;
}
static ssize_t ufs_saved_err_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ufs_debugfs_attr *attr = file->f_inode->i_private;
struct ufs_hba *hba = hba_from_file(file);
char val_str[16] = { };
int val, ret;
if (count > sizeof(val_str))
return -EINVAL;
if (copy_from_user(val_str, buf, count))
return -EFAULT;
ret = kstrtoint(val_str, 0, &val);
if (ret < 0)
return ret;
spin_lock_irq(hba->host->host_lock);
if (strcmp(attr->name, "saved_err") == 0) {
hba->saved_err = val;
} else if (strcmp(attr->name, "saved_uic_err") == 0) {
hba->saved_uic_err = val;
} else {
ret = -ENOENT;
}
if (ret == 0)
ufshcd_schedule_eh_work(hba);
spin_unlock_irq(hba->host->host_lock);
return ret < 0 ? ret : count;
}
static int ufs_saved_err_open(struct inode *inode, struct file *file)
{
return single_open(file, ufs_saved_err_show, inode->i_private);
}
static const struct file_operations ufs_saved_err_fops = {
.owner = THIS_MODULE,
.open = ufs_saved_err_open,
.read = seq_read,
.write = ufs_saved_err_write,
.llseek = seq_lseek,
.release = single_release,
};
static const struct ufs_debugfs_attr ufs_attrs[] = {
{ "stats", 0400, &ufs_debugfs_stats_fops },
{ "saved_err", 0600, &ufs_saved_err_fops },
{ "saved_uic_err", 0600, &ufs_saved_err_fops },
{ }
};
void ufs_debugfs_hba_init(struct ufs_hba *hba)
{
const struct ufs_debugfs_attr *attr;
struct dentry *root;
/* Set default exception event rate limit period to 20ms */
hba->debugfs_ee_rate_limit_ms = 20;
INIT_DELAYED_WORK(&hba->debugfs_ee_work, ufs_debugfs_restart_ee);
root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
if (IS_ERR_OR_NULL(root))
return;
hba->debugfs_root = root;
d_inode(root)->i_private = hba;
for (attr = ufs_attrs; attr->name; attr++)
debugfs_create_file(attr->name, attr->mode, root, (void *)attr,
attr->fops);
debugfs_create_file("exception_event_mask", 0600, hba->debugfs_root,
hba, &ee_usr_mask_fops);
debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
&hba->debugfs_ee_rate_limit_ms);
}
void ufs_debugfs_hba_exit(struct ufs_hba *hba)
{
debugfs_remove_recursive(hba->debugfs_root);
cancel_delayed_work_sync(&hba->debugfs_ee_work);
}
| linux-master | drivers/ufs/core/ufs-debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* UFS hardware monitoring support
* Copyright (c) 2021, Western Digital Corporation
*/
#include <linux/hwmon.h>
#include <linux/units.h>
#include <ufs/ufshcd.h>
#include "ufshcd-priv.h"
struct ufs_hwmon_data {
struct ufs_hba *hba;
u8 mask;
};
static int ufs_read_temp_enable(struct ufs_hba *hba, u8 mask, long *val)
{
u32 ee_mask;
int err;
err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, QUERY_ATTR_IDN_EE_CONTROL, 0, 0,
&ee_mask);
if (err)
return err;
*val = (mask & ee_mask & MASK_EE_TOO_HIGH_TEMP) || (mask & ee_mask & MASK_EE_TOO_LOW_TEMP);
return 0;
}
static int ufs_get_temp(struct ufs_hba *hba, enum attr_idn idn, long *val)
{
u32 value;
int err;
err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, idn, 0, 0, &value);
if (err)
return err;
if (value == 0)
return -ENODATA;
*val = ((long)value - 80) * MILLIDEGREE_PER_DEGREE;
return 0;
}
static int ufs_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
long *val)
{
struct ufs_hwmon_data *data = dev_get_drvdata(dev);
struct ufs_hba *hba = data->hba;
int err;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
up(&hba->host_sem);
return -EBUSY;
}
ufshcd_rpm_get_sync(hba);
switch (attr) {
case hwmon_temp_enable:
err = ufs_read_temp_enable(hba, data->mask, val);
break;
case hwmon_temp_crit:
err = ufs_get_temp(hba, QUERY_ATTR_IDN_HIGH_TEMP_BOUND, val);
break;
case hwmon_temp_lcrit:
err = ufs_get_temp(hba, QUERY_ATTR_IDN_LOW_TEMP_BOUND, val);
break;
case hwmon_temp_input:
err = ufs_get_temp(hba, QUERY_ATTR_IDN_CASE_ROUGH_TEMP, val);
break;
default:
err = -EOPNOTSUPP;
break;
}
ufshcd_rpm_put_sync(hba);
up(&hba->host_sem);
return err;
}
static int ufs_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
long val)
{
struct ufs_hwmon_data *data = dev_get_drvdata(dev);
struct ufs_hba *hba = data->hba;
int err;
if (attr != hwmon_temp_enable)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
down(&hba->host_sem);
if (!ufshcd_is_user_access_allowed(hba)) {
up(&hba->host_sem);
return -EBUSY;
}
ufshcd_rpm_get_sync(hba);
if (val == 1)
err = ufshcd_update_ee_usr_mask(hba, MASK_EE_URGENT_TEMP, 0);
else
err = ufshcd_update_ee_usr_mask(hba, 0, MASK_EE_URGENT_TEMP);
ufshcd_rpm_put_sync(hba);
up(&hba->host_sem);
return err;
}
static umode_t ufs_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type, u32 attr,
int channel)
{
if (type != hwmon_temp)
return 0;
switch (attr) {
case hwmon_temp_enable:
return 0644;
case hwmon_temp_crit:
case hwmon_temp_lcrit:
case hwmon_temp_input:
return 0444;
default:
break;
}
return 0;
}
static const struct hwmon_channel_info *const ufs_hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_LCRIT),
NULL
};
static const struct hwmon_ops ufs_hwmon_ops = {
.is_visible = ufs_hwmon_is_visible,
.read = ufs_hwmon_read,
.write = ufs_hwmon_write,
};
static const struct hwmon_chip_info ufs_hwmon_hba_info = {
.ops = &ufs_hwmon_ops,
.info = ufs_hwmon_info,
};
void ufs_hwmon_probe(struct ufs_hba *hba, u8 mask)
{
struct device *dev = hba->dev;
struct ufs_hwmon_data *data;
struct device *hwmon;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return;
data->hba = hba;
data->mask = mask;
hwmon = hwmon_device_register_with_info(dev, "ufs", data, &ufs_hwmon_hba_info, NULL);
if (IS_ERR(hwmon)) {
dev_warn(dev, "Failed to instantiate hwmon device\n");
kfree(data);
return;
}
hba->hwmon_device = hwmon;
}
void ufs_hwmon_remove(struct ufs_hba *hba)
{
struct ufs_hwmon_data *data;
if (!hba->hwmon_device)
return;
data = dev_get_drvdata(hba->hwmon_device);
hwmon_device_unregister(hba->hwmon_device);
hba->hwmon_device = NULL;
kfree(data);
}
void ufs_hwmon_notify_event(struct ufs_hba *hba, u8 ee_mask)
{
if (!hba->hwmon_device)
return;
if (ee_mask & MASK_EE_TOO_HIGH_TEMP)
hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_max_alarm, 0);
if (ee_mask & MASK_EE_TOO_LOW_TEMP)
hwmon_notify_event(hba->hwmon_device, hwmon_temp, hwmon_temp_min_alarm, 0);
}
| linux-master | drivers/ufs/core/ufs-hwmon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* UNISOC UFS Host Controller driver
*
* Copyright (C) 2022 Unisoc, Inc.
* Author: Zhe Wang <[email protected]>
*/
#include <linux/arm-smccc.h>
#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
#include <linux/regulator/consumer.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
#include "ufs-sprd.h"
static const struct of_device_id ufs_sprd_of_match[];
static struct ufs_sprd_priv *ufs_sprd_get_priv_data(struct ufs_hba *hba)
{
struct ufs_sprd_host *host = ufshcd_get_variant(hba);
WARN_ON(!host->priv);
return host->priv;
}
static void ufs_sprd_regmap_update(struct ufs_sprd_priv *priv, unsigned int index,
unsigned int reg, unsigned int bits, unsigned int val)
{
regmap_update_bits(priv->sysci[index].regmap, reg, bits, val);
}
static void ufs_sprd_regmap_read(struct ufs_sprd_priv *priv, unsigned int index,
unsigned int reg, unsigned int *val)
{
regmap_read(priv->sysci[index].regmap, reg, val);
}
static void ufs_sprd_get_unipro_ver(struct ufs_hba *hba)
{
struct ufs_sprd_host *host = ufshcd_get_variant(hba);
if (ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &host->unipro_ver))
host->unipro_ver = 0;
}
static void ufs_sprd_ctrl_uic_compl(struct ufs_hba *hba, bool enable)
{
u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
if (enable == true)
set |= UIC_COMMAND_COMPL;
else
set &= ~UIC_COMMAND_COMPL;
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
static int ufs_sprd_get_reset_ctrl(struct device *dev, struct ufs_sprd_rst *rci)
{
rci->rc = devm_reset_control_get(dev, rci->name);
if (IS_ERR(rci->rc)) {
dev_err(dev, "failed to get reset ctrl:%s\n", rci->name);
return PTR_ERR(rci->rc);
}
return 0;
}
static int ufs_sprd_get_syscon_reg(struct device *dev, struct ufs_sprd_syscon *sysci)
{
sysci->regmap = syscon_regmap_lookup_by_phandle(dev->of_node, sysci->name);
if (IS_ERR(sysci->regmap)) {
dev_err(dev, "failed to get ufs syscon:%s\n", sysci->name);
return PTR_ERR(sysci->regmap);
}
return 0;
}
static int ufs_sprd_get_vreg(struct device *dev, struct ufs_sprd_vreg *vregi)
{
vregi->vreg = devm_regulator_get(dev, vregi->name);
if (IS_ERR(vregi->vreg)) {
dev_err(dev, "failed to get vreg:%s\n", vregi->name);
return PTR_ERR(vregi->vreg);
}
return 0;
}
static int ufs_sprd_parse_dt(struct device *dev, struct ufs_hba *hba, struct ufs_sprd_host *host)
{
u32 i;
struct ufs_sprd_priv *priv = host->priv;
int ret = 0;
/* Parse UFS reset ctrl info */
for (i = 0; i < SPRD_UFS_RST_MAX; i++) {
if (!priv->rci[i].name)
continue;
ret = ufs_sprd_get_reset_ctrl(dev, &priv->rci[i]);
if (ret)
goto out;
}
/* Parse UFS syscon reg info */
for (i = 0; i < SPRD_UFS_SYSCON_MAX; i++) {
if (!priv->sysci[i].name)
continue;
ret = ufs_sprd_get_syscon_reg(dev, &priv->sysci[i]);
if (ret)
goto out;
}
/* Parse UFS vreg info */
for (i = 0; i < SPRD_UFS_VREG_MAX; i++) {
if (!priv->vregi[i].name)
continue;
ret = ufs_sprd_get_vreg(dev, &priv->vregi[i]);
if (ret)
goto out;
}
out:
return ret;
}
static int ufs_sprd_common_init(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
struct ufs_sprd_host *host;
struct platform_device __maybe_unused *pdev = to_platform_device(dev);
const struct of_device_id *of_id;
int ret = 0;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
of_id = of_match_node(ufs_sprd_of_match, pdev->dev.of_node);
if (of_id->data != NULL)
host->priv = container_of(of_id->data, struct ufs_sprd_priv,
ufs_hba_sprd_vops);
host->hba = hba;
ufshcd_set_variant(hba, host);
hba->caps |= UFSHCD_CAP_CLK_GATING |
UFSHCD_CAP_CRYPTO |
UFSHCD_CAP_WB_EN;
hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS;
ret = ufs_sprd_parse_dt(dev, hba, host);
return ret;
}
static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_sprd_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) {
memcpy(dev_req_params, dev_max_params,
sizeof(struct ufs_pa_layer_attr));
if (host->unipro_ver >= UFS_UNIPRO_VER_1_8)
ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
}
return 0;
}
static int ufs_sprd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
unsigned long flags;
if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba)) {
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
}
return 0;
}
static void ufs_sprd_n6_host_reset(struct ufs_hba *hba)
{
struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
dev_info(hba->dev, "ufs host reset!\n");
reset_control_assert(priv->rci[SPRD_UFSHCI_SOFT_RST].rc);
usleep_range(1000, 1100);
reset_control_deassert(priv->rci[SPRD_UFSHCI_SOFT_RST].rc);
}
static int ufs_sprd_n6_device_reset(struct ufs_hba *hba)
{
struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
dev_info(hba->dev, "ufs device reset!\n");
reset_control_assert(priv->rci[SPRD_UFS_DEV_RST].rc);
usleep_range(1000, 1100);
reset_control_deassert(priv->rci[SPRD_UFS_DEV_RST].rc);
return 0;
}
static void ufs_sprd_n6_key_acc_enable(struct ufs_hba *hba)
{
u32 val;
u32 retry = 10;
struct arm_smccc_res res;
check_hce:
/* Key access only can be enabled under HCE enable */
val = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
if (!(val & CONTROLLER_ENABLE)) {
ufs_sprd_n6_host_reset(hba);
val |= CONTROLLER_ENABLE;
ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
usleep_range(1000, 1100);
if (retry) {
retry--;
goto check_hce;
}
goto disable_crypto;
}
arm_smccc_smc(SPRD_SIP_SVC_STORAGE_UFS_CRYPTO_ENABLE,
0, 0, 0, 0, 0, 0, 0, &res);
if (!res.a0)
return;
disable_crypto:
dev_err(hba->dev, "key reg access enable fail, disable crypto\n");
hba->caps &= ~UFSHCD_CAP_CRYPTO;
}
static int ufs_sprd_n6_init(struct ufs_hba *hba)
{
struct ufs_sprd_priv *priv;
int ret = 0;
ret = ufs_sprd_common_init(hba);
if (ret != 0)
return ret;
priv = ufs_sprd_get_priv_data(hba);
ret = regulator_enable(priv->vregi[SPRD_UFS_VDD_MPHY].vreg);
if (ret)
return -ENODEV;
if (hba->caps & UFSHCD_CAP_CRYPTO)
ufs_sprd_n6_key_acc_enable(hba);
return 0;
}
static int ufs_sprd_n6_phy_init(struct ufs_hba *hba)
{
int ret = 0;
uint32_t val = 0;
uint32_t retry = 10;
uint32_t offset;
struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
ufshcd_dme_set(hba, UIC_ARG_MIB(CBREFCLKCTRL2), 0x90);
ufshcd_dme_set(hba, UIC_ARG_MIB(CBCRCTRL), 0x01);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RXSQCONTROL,
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)), 0x01);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RXSQCONTROL,
UIC_ARG_MPHY_RX_GEN_SEL_INDEX(1)), 0x01);
ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
ufshcd_dme_set(hba, UIC_ARG_MIB(CBRATESEL), 0x01);
do {
/* phy_sram_init_done */
ufs_sprd_regmap_read(priv, SPRD_UFS_ANLG, 0xc, &val);
if ((val & 0x1) == 0x1) {
for (offset = 0x40; offset < 0x42; offset++) {
/* Lane afe calibration */
ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGADDRLSB), 0x1c);
ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGADDRMSB), offset);
ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGWRLSB), 0x04);
ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGWRMSB), 0x00);
ufshcd_dme_set(hba, UIC_ARG_MIB(CBCREGRDWRSEL), 0x01);
ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
}
goto update_phy;
}
udelay(1000);
retry--;
} while (retry > 0);
ret = -ETIMEDOUT;
goto out;
update_phy:
/* phy_sram_ext_ld_done */
ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x2, 0);
ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYDISABLE), 0x0);
out:
return ret;
}
static int sprd_ufs_n6_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
if (status == PRE_CHANGE) {
/* phy_sram_ext_ld_done */
ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x2, 0x2);
/* phy_sram_bypass */
ufs_sprd_regmap_update(priv, SPRD_UFS_ANLG, 0xc, 0x4, 0x4);
ufs_sprd_n6_host_reset(hba);
if (hba->caps & UFSHCD_CAP_CRYPTO)
ufs_sprd_n6_key_acc_enable(hba);
}
if (status == POST_CHANGE) {
err = ufs_sprd_n6_phy_init(hba);
if (err) {
dev_err(hba->dev, "Phy setup failed (%d)\n", err);
goto out;
}
ufs_sprd_get_unipro_ver(hba);
}
out:
return err;
}
static void sprd_ufs_n6_h8_notify(struct ufs_hba *hba,
enum uic_cmd_dme cmd,
enum ufs_notify_change_status status)
{
struct ufs_sprd_priv *priv = ufs_sprd_get_priv_data(hba);
if (status == PRE_CHANGE) {
if (cmd == UIC_CMD_DME_HIBER_ENTER)
/*
* Disable UIC COMPL INTR to prevent access to UFSHCI after
* checking HCS.UPMCRS
*/
ufs_sprd_ctrl_uic_compl(hba, false);
if (cmd == UIC_CMD_DME_HIBER_EXIT) {
ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_UFSDEV_REG,
APB_UFSDEV_REFCLK_EN, APB_UFSDEV_REFCLK_EN);
ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_USB31PLL_CTRL,
APB_USB31PLLV_REF2MPHY, APB_USB31PLLV_REF2MPHY);
}
}
if (status == POST_CHANGE) {
if (cmd == UIC_CMD_DME_HIBER_EXIT)
ufs_sprd_ctrl_uic_compl(hba, true);
if (cmd == UIC_CMD_DME_HIBER_ENTER) {
ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_UFSDEV_REG,
APB_UFSDEV_REFCLK_EN, 0);
ufs_sprd_regmap_update(priv, SPRD_UFS_AON_APB, APB_USB31PLL_CTRL,
APB_USB31PLLV_REF2MPHY, 0);
}
}
}
static struct ufs_sprd_priv n6_ufs = {
.rci[SPRD_UFSHCI_SOFT_RST] = { .name = "controller", },
.rci[SPRD_UFS_DEV_RST] = { .name = "device", },
.sysci[SPRD_UFS_ANLG] = { .name = "sprd,ufs-anlg-syscon", },
.sysci[SPRD_UFS_AON_APB] = { .name = "sprd,aon-apb-syscon", },
.vregi[SPRD_UFS_VDD_MPHY] = { .name = "vdd-mphy", },
.ufs_hba_sprd_vops = {
.name = "sprd,ums9620-ufs",
.init = ufs_sprd_n6_init,
.hce_enable_notify = sprd_ufs_n6_hce_enable_notify,
.pwr_change_notify = sprd_ufs_pwr_change_notify,
.hibern8_notify = sprd_ufs_n6_h8_notify,
.device_reset = ufs_sprd_n6_device_reset,
.suspend = ufs_sprd_suspend,
},
};
static const struct of_device_id __maybe_unused ufs_sprd_of_match[] = {
{ .compatible = "sprd,ums9620-ufs", .data = &n6_ufs.ufs_hba_sprd_vops},
{},
};
MODULE_DEVICE_TABLE(of, ufs_sprd_of_match);
static int ufs_sprd_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
const struct of_device_id *of_id;
of_id = of_match_node(ufs_sprd_of_match, dev->of_node);
err = ufshcd_pltfrm_init(pdev, of_id->data);
if (err)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
return err;
}
static int ufs_sprd_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
return 0;
}
static const struct dev_pm_ops ufs_sprd_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_sprd_pltform = {
.probe = ufs_sprd_probe,
.remove = ufs_sprd_remove,
.driver = {
.name = "ufshcd-sprd",
.pm = &ufs_sprd_pm_ops,
.of_match_table = of_match_ptr(ufs_sprd_of_match),
},
};
module_platform_driver(ufs_sprd_pltform);
MODULE_AUTHOR("Zhe Wang <[email protected]>");
MODULE_DESCRIPTION("Unisoc UFS Host Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/ufs/host/ufs-sprd.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* HiSilicon Hixxxx UFS Driver
*
* Copyright (c) 2016-2017 Linaro Ltd.
* Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
*/
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
#include <ufs/unipro.h>
#include "ufs-hisi.h"
#include <ufs/ufshci.h>
#include <ufs/ufs_quirks.h>
static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
{
int err = 0;
u32 tx_fsm_val_0 = 0;
u32 tx_fsm_val_1 = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
do {
err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
&tx_fsm_val_0);
err |= ufshcd_dme_get(hba,
UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
tx_fsm_val_1 == TX_FSM_HIBERN8))
break;
/* sleep for max. 200us */
usleep_range(100, 200);
} while (time_before(jiffies, timeout));
/*
* we might have scheduled out for long during polling so
* check the state again.
*/
if (time_after(jiffies, timeout)) {
err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
&tx_fsm_val_0);
err |= ufshcd_dme_get(hba,
UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
}
if (err) {
dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
__func__, err);
} else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
tx_fsm_val_1 != TX_FSM_HIBERN8) {
err = -1;
dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
__func__, tx_fsm_val_0, tx_fsm_val_1);
}
return err;
}
static void ufs_hisi_clk_init(struct ufs_hba *hba)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
mdelay(1);
/* use abb clk */
ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
/* open mphy ref clk */
ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
}
static void ufs_hisi_soc_init(struct ufs_hba *hba)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
u32 reg;
if (!IS_ERR(host->rst))
reset_control_assert(host->rst);
/* HC_PSW powerup */
ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
udelay(10);
/* notify PWR ready */
ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
UFS_DEVICE_RESET_CTRL);
reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
/* set cfg clk freq */
ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
/* set ref clk freq */
ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
/* bypass ufs clk gate */
ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
CLOCK_GATE_BYPASS);
ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
/* open psw clk */
ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
/* disable ufshc iso */
ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
/* disable phy iso */
ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
/* notice iso disable */
ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
/* disable lp_reset_n */
ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
mdelay(1);
ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
UFS_DEVICE_RESET_CTRL);
msleep(20);
/*
* enable the fix of linereset recovery,
* and enable rx_reset/tx_rest beat
* enable ref_clk_en override(bit5) &
* override value = 1(bit4), with mask
*/
ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
if (!IS_ERR(host->rst))
reset_control_deassert(host->rst);
}
static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
int err;
uint32_t value;
uint32_t reg;
/* Unipro VS_mphy_disable */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
/* PA_HSSeries */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
/* MPHY CBRATESEL */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
/* MPHY CBOVRCTRL2 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
/* MPHY CBOVRCTRL3 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
if (host->caps & UFS_HISI_CAP_PHY10nm) {
/* MPHY CBOVRCTRL4 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
/* MPHY CBOVRCTRL5 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
}
/* Unipro VS_MphyCfgUpdt */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
/* MPHY RXOVRCTRL4 rx0 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
/* MPHY RXOVRCTRL4 rx1 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
/* MPHY RXOVRCTRL5 rx0 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
/* MPHY RXOVRCTRL5 rx1 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
/* MPHY RXSQCONTROL rx0 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
/* MPHY RXSQCONTROL rx1 */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
/* Unipro VS_MphyCfgUpdt */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
if (host->caps & UFS_HISI_CAP_PHY10nm) {
/* RX_Hibern8Time_Capability*/
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
/* RX_Hibern8Time_Capability*/
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
/* RX_Min_ActivateTime */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
/* RX_Min_ActivateTime*/
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
} else {
/* Tactive RX */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
/* Tactive RX */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
}
/* Gear3 Synclength */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
/* Gear3 Synclength */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
/* Gear2 Synclength */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
/* Gear2 Synclength */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
/* Gear1 Synclength */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
/* Gear1 Synclength */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
/* Thibernate Tx */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
/* Thibernate Tx */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
/* Unipro VS_mphy_disable */
ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
if (value != 0x1)
dev_info(hba->dev,
"Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
/* Unipro VS_mphy_disable */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
err = ufs_hisi_check_hibern8(hba);
if (err)
dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
if (!(host->caps & UFS_HISI_CAP_PHY10nm))
ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
/* disable auto H8 */
reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
reg = reg & (~UFS_AHIT_AH8ITV_MASK);
ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
/* Unipro PA_Local_TX_LCC_Enable */
ufshcd_disable_host_tx_lcc(hba);
/* close Unipro VS_Mk2ExtnSupport */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
if (value != 0) {
/* Ensure close success */
dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
}
return err;
}
static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
/* Unipro DL_AFC0CreditThreshold */
ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
/* Unipro DL_TC0OutAckThreshold */
ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
/* Unipro DL_TC0TXFCThreshold */
ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
/* not bypass ufs clk gate */
ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
CLOCK_GATE_BYPASS);
ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
UFS_SYSCTRL);
/* select received symbol cnt */
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
/* reset counter0 and enable */
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
return 0;
}
static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
switch (status) {
case PRE_CHANGE:
err = ufs_hisi_link_startup_pre_change(hba);
break;
case POST_CHANGE:
err = ufs_hisi_link_startup_post_change(hba);
break;
default:
break;
}
return err;
}
static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
{
ufshcd_init_pwr_dev_param(hisi_param);
}
static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
if (host->caps & UFS_HISI_CAP_PHY10nm) {
/*
* Boston platform need to set SaveConfigTime to 0x13,
* and change sync length to maximum value
*/
/* VS_DebugSaveConfigTime */
ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
/* g1 sync length */
ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
/* g2 sync length */
ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
/* g3 sync length */
ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
/* PA_Hibern8Time */
ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
/* PA_Tactivate */
ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
}
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
/* VS_DebugSaveConfigTime */
ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
/* sync length */
ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
}
/* update */
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
/* PA_TxSkip */
ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
/*PA_PWRModeUserData0 = 8191, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), SZ_8K - 1);
/*PA_PWRModeUserData1 = 65535, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), SZ_64K - 1);
/*PA_PWRModeUserData2 = 32767, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), SZ_32K - 1);
/*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), SZ_8K - 1);
/*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), SZ_64K - 1);
/*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), SZ_32K - 1);
/*PA_PWRModeUserData3 = 8191, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), SZ_8K - 1);
/*PA_PWRModeUserData4 = 65535, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), SZ_64K - 1);
/*PA_PWRModeUserData5 = 32767, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), SZ_32K - 1);
/*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), SZ_8K - 1);
/*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), SZ_64K - 1);
/*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), SZ_32K - 1);
}
static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_dev_params ufs_hisi_cap;
int ret = 0;
if (!dev_req_params) {
dev_err(hba->dev,
"%s: incoming dev_req_params is NULL\n", __func__);
ret = -EINVAL;
goto out;
}
switch (status) {
case PRE_CHANGE:
ufs_hisi_set_dev_cap(&ufs_hisi_cap);
ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
dev_max_params, dev_req_params);
if (ret) {
dev_err(hba->dev,
"%s: failed to determine capabilities\n", __func__);
goto out;
}
ufs_hisi_pwr_change_pre_change(hba);
break;
case POST_CHANGE:
break;
default:
ret = -EINVAL;
break;
}
out:
return ret;
}
static int ufs_hisi_suspend_prepare(struct device *dev)
{
/* RPM and SPM are different. Refer ufs_hisi_suspend() */
return __ufshcd_suspend_prepare(dev, false);
}
static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE)
return 0;
if (pm_op == UFS_RUNTIME_PM)
return 0;
if (host->in_suspend) {
WARN_ON(1);
return 0;
}
ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
udelay(10);
/* set ref_dig_clk override of PHY PCS to 0 */
ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
host->in_suspend = true;
return 0;
}
static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
if (!host->in_suspend)
return 0;
/* set ref_dig_clk override of PHY PCS to 1 */
ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
udelay(10);
ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
host->in_suspend = false;
return 0;
}
static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
{
struct device *dev = host->hba->dev;
struct platform_device *pdev = to_platform_device(dev);
/* get resource of ufs sys ctrl */
host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
}
static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
{
hba->rpm_lvl = UFS_PM_LVL_1;
hba->spm_lvl = UFS_PM_LVL_3;
}
/**
* ufs_hisi_init_common
* @hba: host controller instance
*/
static int ufs_hisi_init_common(struct ufs_hba *hba)
{
int err = 0;
struct device *dev = hba->dev;
struct ufs_hisi_host *host;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
host->hba = hba;
ufshcd_set_variant(hba, host);
host->rst = devm_reset_control_get(dev, "rst");
if (IS_ERR(host->rst)) {
dev_err(dev, "%s: failed to get reset control\n", __func__);
err = PTR_ERR(host->rst);
goto error;
}
ufs_hisi_set_pm_lvl(hba);
err = ufs_hisi_get_resource(host);
if (err)
goto error;
return 0;
error:
ufshcd_set_variant(hba, NULL);
return err;
}
static int ufs_hi3660_init(struct ufs_hba *hba)
{
int ret = 0;
struct device *dev = hba->dev;
ret = ufs_hisi_init_common(hba);
if (ret) {
dev_err(dev, "%s: ufs common init fail\n", __func__);
return ret;
}
ufs_hisi_clk_init(hba);
ufs_hisi_soc_init(hba);
return 0;
}
static int ufs_hi3670_init(struct ufs_hba *hba)
{
int ret = 0;
struct device *dev = hba->dev;
struct ufs_hisi_host *host;
ret = ufs_hisi_init_common(hba);
if (ret) {
dev_err(dev, "%s: ufs common init fail\n", __func__);
return ret;
}
ufs_hisi_clk_init(hba);
ufs_hisi_soc_init(hba);
/* Add cap for 10nm PHY variant on HI3670 SoC */
host = ufshcd_get_variant(hba);
host->caps |= UFS_HISI_CAP_PHY10nm;
return 0;
}
static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
.name = "hi3660",
.init = ufs_hi3660_init,
.link_startup_notify = ufs_hisi_link_startup_notify,
.pwr_change_notify = ufs_hisi_pwr_change_notify,
.suspend = ufs_hisi_suspend,
.resume = ufs_hisi_resume,
};
static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
.name = "hi3670",
.init = ufs_hi3670_init,
.link_startup_notify = ufs_hisi_link_startup_notify,
.pwr_change_notify = ufs_hisi_pwr_change_notify,
.suspend = ufs_hisi_suspend,
.resume = ufs_hisi_resume,
};
static const struct of_device_id ufs_hisi_of_match[] = {
{ .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
{ .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
{},
};
MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
static int ufs_hisi_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
return ufshcd_pltfrm_init(pdev, of_id->data);
}
static int ufs_hisi_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
ufshcd_remove(hba);
return 0;
}
static const struct dev_pm_ops ufs_hisi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufs_hisi_suspend_prepare,
.complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_hisi_pltform = {
.probe = ufs_hisi_probe,
.remove = ufs_hisi_remove,
.driver = {
.name = "ufshcd-hisi",
.pm = &ufs_hisi_pm_ops,
.of_match_table = ufs_hisi_of_match,
},
};
module_platform_driver(ufs_hisi_pltform);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ufshcd-hisi");
MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");
| linux-master | drivers/ufs/host/ufs-hisi.c |
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
//
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#define TI_UFS_SS_CTRL 0x4
#define TI_UFS_SS_RST_N_PCS BIT(0)
#define TI_UFS_SS_CLK_26MHZ BIT(4)
static int ti_j721e_ufs_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
unsigned long clk_rate;
void __iomem *regbase;
struct clk *clk;
u32 reg = 0;
int ret;
regbase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regbase))
return PTR_ERR(regbase);
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0)
goto disable_pm;
/* Select MPHY refclk frequency */
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
dev_err(dev, "Cannot claim MPHY clock.\n");
goto clk_err;
}
clk_rate = clk_get_rate(clk);
if (clk_rate == 26000000)
reg |= TI_UFS_SS_CLK_26MHZ;
devm_clk_put(dev, clk);
/* Take UFS slave device out of reset */
reg |= TI_UFS_SS_RST_N_PCS;
writel(reg, regbase + TI_UFS_SS_CTRL);
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL,
dev);
if (ret) {
dev_err(dev, "failed to populate child nodes %d\n", ret);
goto clk_err;
}
return ret;
clk_err:
pm_runtime_put_sync(dev);
disable_pm:
pm_runtime_disable(dev);
return ret;
}
static int ti_j721e_ufs_remove(struct platform_device *pdev)
{
of_platform_depopulate(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
static const struct of_device_id ti_j721e_ufs_of_match[] = {
{
.compatible = "ti,j721e-ufs",
},
{ },
};
MODULE_DEVICE_TABLE(of, ti_j721e_ufs_of_match);
static struct platform_driver ti_j721e_ufs_driver = {
.probe = ti_j721e_ufs_probe,
.remove = ti_j721e_ufs_remove,
.driver = {
.name = "ti-j721e-ufs",
.of_match_table = ti_j721e_ufs_of_match,
},
};
module_platform_driver(ti_j721e_ufs_driver);
MODULE_AUTHOR("Vignesh Raghavendra <[email protected]>");
MODULE_DESCRIPTION("TI UFS host controller glue driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/ufs/host/ti-j721e-ufs.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* UFS Host driver for Synopsys Designware Core
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
* Authors: Joao Pinto <[email protected]>
*/
#include <linux/module.h>
#include <ufs/ufshcd.h>
#include <ufs/unipro.h>
#include "ufshcd-dwc.h"
#include "ufshci-dwc.h"
int ufshcd_dwc_dme_set_attrs(struct ufs_hba *hba,
const struct ufshcd_dme_attr_val *v, int n)
{
int ret = 0;
int attr_node = 0;
for (attr_node = 0; attr_node < n; attr_node++) {
ret = ufshcd_dme_set_attr(hba, v[attr_node].attr_sel,
ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer);
if (ret)
return ret;
}
return 0;
}
EXPORT_SYMBOL(ufshcd_dwc_dme_set_attrs);
/**
* ufshcd_dwc_program_clk_div() - program clock divider.
* @hba: Private Structure pointer
* @divider_val: clock divider value to be programmed
*
*/
static void ufshcd_dwc_program_clk_div(struct ufs_hba *hba, u32 divider_val)
{
ufshcd_writel(hba, divider_val, DWC_UFS_REG_HCLKDIV);
}
/**
* ufshcd_dwc_link_is_up() - check if link is up.
* @hba: private structure pointer
*
* Return: 0 on success, non-zero value on failure.
*/
static int ufshcd_dwc_link_is_up(struct ufs_hba *hba)
{
int dme_result = 0;
ufshcd_dme_get(hba, UIC_ARG_MIB(VS_POWERSTATE), &dme_result);
if (dme_result == UFSHCD_LINK_IS_UP) {
ufshcd_set_link_active(hba);
return 0;
}
return 1;
}
/**
* ufshcd_dwc_connection_setup() - configure unipro attributes.
* @hba: pointer to drivers private data
*
* This function configures both the local side (host) and the peer side
* (device) unipro attributes to establish the connection to application/
* cport.
* This function is not required if the hardware is properly configured to
* have this connection setup on reset. But invoking this function does no
* harm and should be fine even working with any ufs device.
*
* Return: 0 on success non-zero value on failure.
*/
static int ufshcd_dwc_connection_setup(struct ufs_hba *hba)
{
static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL },
{ UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL },
{ UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_LOCAL },
{ UIC_ARG_MIB(T_PEERCPORTID), 0, DME_LOCAL },
{ UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_LOCAL },
{ UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_LOCAL },
{ UIC_ARG_MIB(T_CPORTMODE), 1, DME_LOCAL },
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_LOCAL },
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_PEER },
{ UIC_ARG_MIB(N_DEVICEID), 1, DME_PEER },
{ UIC_ARG_MIB(N_DEVICEID_VALID), 1, DME_PEER },
{ UIC_ARG_MIB(T_PEERDEVICEID), 1, DME_PEER },
{ UIC_ARG_MIB(T_PEERCPORTID), 0, DME_PEER },
{ UIC_ARG_MIB(T_TRAFFICCLASS), 0, DME_PEER },
{ UIC_ARG_MIB(T_CPORTFLAGS), 0x6, DME_PEER },
{ UIC_ARG_MIB(T_CPORTMODE), 1, DME_PEER },
{ UIC_ARG_MIB(T_CONNECTIONSTATE), 1, DME_PEER }
};
return ufshcd_dwc_dme_set_attrs(hba, setup_attrs, ARRAY_SIZE(setup_attrs));
}
/**
* ufshcd_dwc_link_startup_notify() - program clock divider.
* @hba: private structure pointer
* @status: Callback notify status
*
* Return: 0 on success, non-zero value on failure.
*/
int ufshcd_dwc_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
if (status == PRE_CHANGE) {
ufshcd_dwc_program_clk_div(hba, DWC_UFS_REG_HCLKDIV_DIV_125);
err = ufshcd_vops_phy_initialization(hba);
if (err) {
dev_err(hba->dev, "Phy setup failed (%d)\n", err);
goto out;
}
} else { /* POST_CHANGE */
err = ufshcd_dwc_link_is_up(hba);
if (err) {
dev_err(hba->dev, "Link is not up\n");
goto out;
}
err = ufshcd_dwc_connection_setup(hba);
if (err)
dev_err(hba->dev, "Connection setup failed (%d)\n",
err);
}
out:
return err;
}
EXPORT_SYMBOL(ufshcd_dwc_link_startup_notify);
MODULE_AUTHOR("Joao Pinto <[email protected]>");
MODULE_DESCRIPTION("UFS Host driver for Synopsys Designware Core");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/ufs/host/ufshcd-dwc.c |
// SPDX-License-Identifier: GPL-2.0 OR MIT
/*
* Renesas UFS host controller driver
*
* Copyright (C) 2022 Renesas Electronics Corporation
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
struct ufs_renesas_priv {
bool initialized; /* The hardware needs initialization once */
};
enum {
SET_PHY_INDEX_LO = 0,
SET_PHY_INDEX_HI,
TIMER_INDEX,
MAX_INDEX
};
enum ufs_renesas_init_param_mode {
MODE_RESTORE,
MODE_SET,
MODE_SAVE,
MODE_POLL,
MODE_WAIT,
MODE_WRITE,
};
#define PARAM_RESTORE(_reg, _index) \
{ .mode = MODE_RESTORE, .reg = _reg, .index = _index }
#define PARAM_SET(_index, _set) \
{ .mode = MODE_SET, .index = _index, .u.set = _set }
#define PARAM_SAVE(_reg, _mask, _index) \
{ .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \
.index = _index }
#define PARAM_POLL(_reg, _expected, _mask) \
{ .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \
.mask = (u32)(_mask) }
#define PARAM_WAIT(_delay_us) \
{ .mode = MODE_WAIT, .u.delay_us = _delay_us }
#define PARAM_WRITE(_reg, _val) \
{ .mode = MODE_WRITE, .reg = _reg, .u.val = _val }
#define PARAM_WRITE_D0_D4(_d0, _d4) \
PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4)
#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \
PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \
PARAM_WRITE(0xd0, 0x0000080c), \
PARAM_POLL(0xd4, BIT(8), BIT(8))
#define PARAM_RESTORE_800_80C_POLL(_index) \
PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
PARAM_WRITE(0xd0, 0x00000800), \
PARAM_RESTORE(0xd4, _index), \
PARAM_WRITE(0xd0, 0x0000080c), \
PARAM_POLL(0xd4, BIT(8), BIT(8))
#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \
PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \
PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \
PARAM_WRITE(0xd0, 0x0000080c), \
PARAM_POLL(0xd4, BIT(8), BIT(8))
#define PARAM_WRITE_828_82C_POLL(_data_828) \
PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \
PARAM_WRITE_D0_D4(0x00000828, _data_828), \
PARAM_WRITE(0xd0, 0x0000082c), \
PARAM_POLL(0xd4, _data_828, _data_828)
#define PARAM_WRITE_PHY(_addr16, _data16) \
PARAM_WRITE(0xf0, 1), \
PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \
PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \
PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
PARAM_WRITE_828_82C_POLL(0x0f000000), \
PARAM_WRITE(0xf0, 0)
#define PARAM_SET_PHY(_addr16, _data16) \
PARAM_WRITE(0xf0, 1), \
PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
PARAM_WRITE_828_82C_POLL(0x0f000000), \
PARAM_WRITE_804_80C_POLL(0x1a, 0), \
PARAM_WRITE(0xd0, 0x00000808), \
PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \
PARAM_WRITE_804_80C_POLL(0x1b, 0), \
PARAM_WRITE(0xd0, 0x00000808), \
PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \
PARAM_WRITE_828_82C_POLL(0x0f000000), \
PARAM_WRITE(0xf0, 0), \
PARAM_WRITE(0xf0, 1), \
PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \
PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \
PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \
PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \
PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \
PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \
PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \
PARAM_WRITE_828_82C_POLL(0x0f000000), \
PARAM_WRITE(0xf0, 0)
#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \
PARAM_WRITE(0xf0, _gpio), \
PARAM_WRITE_800_80C_POLL(_addr, _data_800), \
PARAM_WRITE_828_82C_POLL(0x0f000000), \
PARAM_WRITE(0xf0, 0)
#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \
PARAM_WRITE(0xf0, _gpio), \
PARAM_WRITE_800_80C_POLL(_addr, 0), \
PARAM_WRITE(0xd0, 0x00000808), \
PARAM_POLL(0xd4, _expected, _mask), \
PARAM_WRITE(0xf0, 0)
struct ufs_renesas_init_param {
enum ufs_renesas_init_param_mode mode;
u32 reg;
union {
u32 expected;
u32 delay_us;
u32 set;
u32 val;
} u;
u32 mask;
u32 index;
};
/* This setting is for SERIES B */
static const struct ufs_renesas_init_param ufs_param[] = {
PARAM_WRITE(0xc0, 0x49425308),
PARAM_WRITE_D0_D4(0x00000104, 0x00000002),
PARAM_WAIT(1),
PARAM_WRITE_D0_D4(0x00000828, 0x00000200),
PARAM_WAIT(1),
PARAM_WRITE_D0_D4(0x00000828, 0x00000000),
PARAM_WRITE_D0_D4(0x00000104, 0x00000001),
PARAM_WRITE_D0_D4(0x00000940, 0x00000001),
PARAM_WAIT(1),
PARAM_WRITE_D0_D4(0x00000940, 0x00000000),
PARAM_WRITE(0xc0, 0x49425308),
PARAM_WRITE(0xc0, 0x41584901),
PARAM_WRITE_D0_D4(0x0000080c, 0x00000100),
PARAM_WRITE_D0_D4(0x00000804, 0x00000000),
PARAM_WRITE(0xd0, 0x0000080c),
PARAM_POLL(0xd4, BIT(8), BIT(8)),
PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001),
PARAM_WRITE(0xd0, 0x00000804),
PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)),
PARAM_WRITE(0xd0, 0x00000d00),
PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX),
PARAM_WRITE(0xd4, 0x00000000),
PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000),
PARAM_WRITE_D0_D4(0x00000828, 0x08000000),
PARAM_WRITE(0xd0, 0x0000082c),
PARAM_POLL(0xd4, BIT(27), BIT(27)),
PARAM_WRITE(0xd0, 0x00000d2c),
PARAM_POLL(0xd4, BIT(0), BIT(0)),
/* phy setup */
PARAM_INDIRECT_WRITE(1, 0x01, 0x001f),
PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014),
PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003),
PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003),
PARAM_INDIRECT_WRITE(7, 0x60, 0x0003),
PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6),
PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003),
PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)),
PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)),
PARAM_INDIRECT_WRITE(1, 0x32, 0x0080),
PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001),
PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001),
PARAM_INDIRECT_WRITE(0, 0x32, 0x0087),
PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061),
PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009),
PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005),
PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058),
PARAM_INDIRECT_WRITE(1, 0x39, 0x0027),
PARAM_INDIRECT_WRITE(1, 0x47, 0x004c),
PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002),
PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007),
PARAM_WRITE_PHY(0x0028, 0x0061),
PARAM_WRITE_PHY(0x4014, 0x0061),
PARAM_SET_PHY(0x401c, BIT(2)),
PARAM_WRITE_PHY(0x4000, 0x0000),
PARAM_WRITE_PHY(0x4001, 0x0000),
PARAM_WRITE_PHY(0x10ae, 0x0001),
PARAM_WRITE_PHY(0x10ad, 0x0000),
PARAM_WRITE_PHY(0x10af, 0x0001),
PARAM_WRITE_PHY(0x10b6, 0x0001),
PARAM_WRITE_PHY(0x10ae, 0x0000),
PARAM_WRITE_PHY(0x10ae, 0x0001),
PARAM_WRITE_PHY(0x10ad, 0x0000),
PARAM_WRITE_PHY(0x10af, 0x0002),
PARAM_WRITE_PHY(0x10b6, 0x0001),
PARAM_WRITE_PHY(0x10ae, 0x0000),
PARAM_WRITE_PHY(0x10ae, 0x0001),
PARAM_WRITE_PHY(0x10ad, 0x0080),
PARAM_WRITE_PHY(0x10af, 0x0000),
PARAM_WRITE_PHY(0x10b6, 0x0001),
PARAM_WRITE_PHY(0x10ae, 0x0000),
PARAM_WRITE_PHY(0x10ae, 0x0001),
PARAM_WRITE_PHY(0x10ad, 0x0080),
PARAM_WRITE_PHY(0x10af, 0x001a),
PARAM_WRITE_PHY(0x10b6, 0x0001),
PARAM_WRITE_PHY(0x10ae, 0x0000),
PARAM_INDIRECT_WRITE(7, 0x70, 0x0016),
PARAM_INDIRECT_WRITE(7, 0x71, 0x0016),
PARAM_INDIRECT_WRITE(7, 0x72, 0x0014),
PARAM_INDIRECT_WRITE(7, 0x73, 0x0014),
PARAM_INDIRECT_WRITE(7, 0x74, 0x0000),
PARAM_INDIRECT_WRITE(7, 0x75, 0x0000),
PARAM_INDIRECT_WRITE(7, 0x76, 0x0010),
PARAM_INDIRECT_WRITE(7, 0x77, 0x0010),
PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff),
PARAM_INDIRECT_WRITE(7, 0x79, 0x0000),
PARAM_INDIRECT_WRITE(7, 0x19, 0x0007),
PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007),
PARAM_INDIRECT_WRITE(7, 0x24, 0x000c),
PARAM_INDIRECT_WRITE(7, 0x25, 0x000c),
PARAM_INDIRECT_WRITE(7, 0x62, 0x0000),
PARAM_INDIRECT_WRITE(7, 0x63, 0x0000),
PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014),
PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004),
PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017),
PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)),
PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)),
/* end of phy setup */
PARAM_WRITE(0xf0, 0),
PARAM_WRITE(0xd0, 0x00000d00),
PARAM_RESTORE(0xd4, TIMER_INDEX),
};
static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba)
{
ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + ");
}
static void ufs_renesas_reg_control(struct ufs_hba *hba,
const struct ufs_renesas_init_param *p)
{
static u32 save[MAX_INDEX];
int ret;
u32 val;
WARN_ON(p->index >= MAX_INDEX);
switch (p->mode) {
case MODE_RESTORE:
ufshcd_writel(hba, save[p->index], p->reg);
break;
case MODE_SET:
save[p->index] |= p->u.set;
break;
case MODE_SAVE:
save[p->index] = ufshcd_readl(hba, p->reg) & p->mask;
break;
case MODE_POLL:
ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg,
val,
(val & p->mask) == p->u.expected,
10, 1000);
if (ret)
dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n",
__func__, ret, val, p->mask, p->u.expected);
break;
case MODE_WAIT:
if (p->u.delay_us > 1000)
mdelay(DIV_ROUND_UP(p->u.delay_us, 1000));
else
udelay(p->u.delay_us);
break;
case MODE_WRITE:
ufshcd_writel(hba, p->u.val, p->reg);
break;
default:
break;
}
}
static void ufs_renesas_pre_init(struct ufs_hba *hba)
{
const struct ufs_renesas_init_param *p = ufs_param;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ufs_param); i++)
ufs_renesas_reg_control(hba, &p[i]);
}
static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_renesas_priv *priv = ufshcd_get_variant(hba);
if (priv->initialized)
return 0;
if (status == PRE_CHANGE)
ufs_renesas_pre_init(hba);
priv->initialized = true;
return 0;
}
static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
if (on && status == PRE_CHANGE)
pm_runtime_get_sync(hba->dev);
else if (!on && status == POST_CHANGE)
pm_runtime_put(hba->dev);
return 0;
}
static int ufs_renesas_init(struct ufs_hba *hba)
{
struct ufs_renesas_priv *priv;
priv = devm_kzalloc(hba->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ufshcd_set_variant(hba, priv);
hba->quirks |= UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS | UFSHCD_QUIRK_HIBERN_FASTAUTO;
return 0;
}
static const struct ufs_hba_variant_ops ufs_renesas_vops = {
.name = "renesas",
.init = ufs_renesas_init,
.setup_clocks = ufs_renesas_setup_clocks,
.hce_enable_notify = ufs_renesas_hce_enable_notify,
.dbg_register_dump = ufs_renesas_dbg_register_dump,
};
static const struct of_device_id __maybe_unused ufs_renesas_of_match[] = {
{ .compatible = "renesas,r8a779f0-ufs" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ufs_renesas_of_match);
static int ufs_renesas_probe(struct platform_device *pdev)
{
return ufshcd_pltfrm_init(pdev, &ufs_renesas_vops);
}
static int ufs_renesas_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
ufshcd_remove(hba);
return 0;
}
static struct platform_driver ufs_renesas_platform = {
.probe = ufs_renesas_probe,
.remove = ufs_renesas_remove,
.driver = {
.name = "ufshcd-renesas",
.of_match_table = of_match_ptr(ufs_renesas_of_match),
},
};
module_platform_driver(ufs_renesas_platform);
MODULE_AUTHOR("Yoshihiro Shimoda <[email protected]>");
MODULE_DESCRIPTION("Renesas UFS host controller driver");
MODULE_LICENSE("Dual MIT/GPL");
| linux-master | drivers/ufs/host/ufs-renesas.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Synopsys G210 Test Chip driver
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
* Authors: Joao Pinto <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include "ufshcd-pltfrm.h"
#include "ufshcd-dwc.h"
#include "tc-dwc-g210.h"
/*
* UFS DWC specific variant operations
*/
static struct ufs_hba_variant_ops tc_dwc_g210_20bit_pltfm_hba_vops = {
.name = "tc-dwc-g210-pltfm",
.link_startup_notify = ufshcd_dwc_link_startup_notify,
.phy_initialization = tc_dwc_g210_config_20_bit,
};
static struct ufs_hba_variant_ops tc_dwc_g210_40bit_pltfm_hba_vops = {
.name = "tc-dwc-g210-pltfm",
.link_startup_notify = ufshcd_dwc_link_startup_notify,
.phy_initialization = tc_dwc_g210_config_40_bit,
};
static const struct of_device_id tc_dwc_g210_pltfm_match[] = {
{
.compatible = "snps,g210-tc-6.00-20bit",
.data = &tc_dwc_g210_20bit_pltfm_hba_vops,
},
{
.compatible = "snps,g210-tc-6.00-40bit",
.data = &tc_dwc_g210_40bit_pltfm_hba_vops,
},
{ },
};
MODULE_DEVICE_TABLE(of, tc_dwc_g210_pltfm_match);
/**
* tc_dwc_g210_pltfm_probe()
* @pdev: pointer to platform device structure
*
*/
static int tc_dwc_g210_pltfm_probe(struct platform_device *pdev)
{
int err;
const struct of_device_id *of_id;
struct ufs_hba_variant_ops *vops;
struct device *dev = &pdev->dev;
of_id = of_match_node(tc_dwc_g210_pltfm_match, dev->of_node);
vops = (struct ufs_hba_variant_ops *)of_id->data;
/* Perform generic probe */
err = ufshcd_pltfrm_init(pdev, vops);
if (err)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
return err;
}
/**
* tc_dwc_g210_pltfm_remove()
* @pdev: pointer to platform device structure
*
*/
static int tc_dwc_g210_pltfm_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
return 0;
}
static const struct dev_pm_ops tc_dwc_g210_pltfm_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
};
static struct platform_driver tc_dwc_g210_pltfm_driver = {
.probe = tc_dwc_g210_pltfm_probe,
.remove = tc_dwc_g210_pltfm_remove,
.driver = {
.name = "tc-dwc-g210-pltfm",
.pm = &tc_dwc_g210_pltfm_pm_ops,
.of_match_table = of_match_ptr(tc_dwc_g210_pltfm_match),
},
};
module_platform_driver(tc_dwc_g210_pltfm_driver);
MODULE_ALIAS("platform:tc-dwc-g210-pltfm");
MODULE_DESCRIPTION("Synopsys Test Chip G210 platform glue driver");
MODULE_AUTHOR("Joao Pinto <[email protected]>");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/ufs/host/tc-dwc-g210-pltfrm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Synopsys G210 Test Chip driver
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
* Authors: Joao Pinto <[email protected]>
*/
#include <ufs/ufshcd.h>
#include "ufshcd-dwc.h"
#include "tc-dwc-g210.h"
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
/* Test Chip type expected values */
#define TC_G210_20BIT 20
#define TC_G210_40BIT 40
#define TC_G210_INV 0
static int tc_type = TC_G210_INV;
module_param(tc_type, int, 0);
MODULE_PARM_DESC(tc_type, "Test Chip Type (20 = 20-bit, 40 = 40-bit)");
/*
* struct ufs_hba_dwc_vops - UFS DWC specific variant operations
*/
static struct ufs_hba_variant_ops tc_dwc_g210_pci_hba_vops = {
.name = "tc-dwc-g210-pci",
.link_startup_notify = ufshcd_dwc_link_startup_notify,
};
/**
* tc_dwc_g210_pci_remove - de-allocate PCI/SCSI host and host memory space
* data structure memory
* @pdev: pointer to PCI handle
*/
static void tc_dwc_g210_pci_remove(struct pci_dev *pdev)
{
struct ufs_hba *hba = pci_get_drvdata(pdev);
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
}
/**
* tc_dwc_g210_pci_probe - probe routine of the driver
* @pdev: pointer to PCI device handle
* @id: PCI device id
*
* Return: 0 on success, non-zero value on failure.
*/
static int
tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ufs_hba *hba;
void __iomem *mmio_base;
int err;
/* Check Test Chip type and set the specific setup routine */
if (tc_type == TC_G210_20BIT) {
tc_dwc_g210_pci_hba_vops.phy_initialization =
tc_dwc_g210_config_20_bit;
} else if (tc_type == TC_G210_40BIT) {
tc_dwc_g210_pci_hba_vops.phy_initialization =
tc_dwc_g210_config_40_bit;
} else {
dev_err(&pdev->dev, "test chip version not specified\n");
return -EPERM;
}
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "pcim_enable_device failed\n");
return err;
}
pci_set_master(pdev);
err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
if (err < 0) {
dev_err(&pdev->dev, "request and iomap failed\n");
return err;
}
mmio_base = pcim_iomap_table(pdev)[0];
err = ufshcd_alloc_host(&pdev->dev, &hba);
if (err) {
dev_err(&pdev->dev, "Allocation failed\n");
return err;
}
hba->vops = &tc_dwc_g210_pci_hba_vops;
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
return err;
}
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
}
static const struct dev_pm_ops tc_dwc_g210_pci_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
static const struct pci_device_id tc_dwc_g210_pci_tbl[] = {
{ PCI_VENDOR_ID_SYNOPSYS, 0xB101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SYNOPSYS, 0xB102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, tc_dwc_g210_pci_tbl);
static struct pci_driver tc_dwc_g210_pci_driver = {
.name = "tc-dwc-g210-pci",
.id_table = tc_dwc_g210_pci_tbl,
.probe = tc_dwc_g210_pci_probe,
.remove = tc_dwc_g210_pci_remove,
.driver = {
.pm = &tc_dwc_g210_pci_pm_ops
},
};
module_pci_driver(tc_dwc_g210_pci_driver);
MODULE_AUTHOR("Joao Pinto <[email protected]>");
MODULE_DESCRIPTION("Synopsys Test Chip G210 PCI glue driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/ufs/host/tc-dwc-g210-pci.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
*/
#include <linux/acpi.h>
#include <linux/time.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/interconnect.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/gpio/consumer.h>
#include <linux/reset-controller.h>
#include <linux/devfreq.h>
#include <soc/qcom/ice.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
#include <ufs/unipro.h>
#include "ufs-qcom.h"
#include <ufs/ufshci.h>
#include <ufs/ufs_quirks.h>
#define MCQ_QCFGPTR_MASK GENMASK(7, 0)
#define MCQ_QCFGPTR_UNIT 0x200
#define MCQ_SQATTR_OFFSET(c) \
((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
#define MCQ_QCFG_SIZE 0x40
enum {
TSTBUS_UAWM,
TSTBUS_UARM,
TSTBUS_TXUC,
TSTBUS_RXUC,
TSTBUS_DFC,
TSTBUS_TRLUT,
TSTBUS_TMRLUT,
TSTBUS_OCSC,
TSTBUS_UTP_HCI,
TSTBUS_COMBINED,
TSTBUS_WRAPPER,
TSTBUS_UNIPRO,
TSTBUS_MAX,
};
#define QCOM_UFS_MAX_GEAR 4
#define QCOM_UFS_MAX_LANE 2
enum {
MODE_MIN,
MODE_PWM,
MODE_HS_RA,
MODE_HS_RB,
MODE_MAX,
};
static const struct __ufs_qcom_bw_table {
u32 mem_bw;
u32 cfg_bw;
} ufs_qcom_bw_table[MODE_MAX + 1][QCOM_UFS_MAX_GEAR + 1][QCOM_UFS_MAX_LANE + 1] = {
[MODE_MIN][0][0] = { 0, 0 }, /* Bandwidth values in KB/s */
[MODE_PWM][UFS_PWM_G1][UFS_LANE_1] = { 922, 1000 },
[MODE_PWM][UFS_PWM_G2][UFS_LANE_1] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_1] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_1] = { 7376, 1000 },
[MODE_PWM][UFS_PWM_G1][UFS_LANE_2] = { 1844, 1000 },
[MODE_PWM][UFS_PWM_G2][UFS_LANE_2] = { 3688, 1000 },
[MODE_PWM][UFS_PWM_G3][UFS_LANE_2] = { 7376, 1000 },
[MODE_PWM][UFS_PWM_G4][UFS_LANE_2] = { 14752, 1000 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_1] = { 127796, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_1] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
[MODE_HS_RA][UFS_HS_G1][UFS_LANE_2] = { 255591, 1000 },
[MODE_HS_RA][UFS_HS_G2][UFS_LANE_2] = { 511181, 1000 },
[MODE_HS_RA][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RA][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_1] = { 149422, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_1] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_1] = { 1492582, 102400 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_1] = { 2915200, 204800 },
[MODE_HS_RB][UFS_HS_G1][UFS_LANE_2] = { 298189, 1000 },
[MODE_HS_RB][UFS_HS_G2][UFS_LANE_2] = { 596378, 1000 },
[MODE_HS_RB][UFS_HS_G3][UFS_LANE_2] = { 1492582, 204800 },
[MODE_HS_RB][UFS_HS_G4][UFS_LANE_2] = { 2915200, 409600 },
[MODE_MAX][0][0] = { 7643136, 307200 },
};
static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_cycles);
static struct ufs_qcom_host *rcdev_to_ufs_host(struct reset_controller_dev *rcd)
{
return container_of(rcd, struct ufs_qcom_host, rcdev);
}
#ifdef CONFIG_SCSI_UFS_CRYPTO
static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
{
if (host->hba->caps & UFSHCD_CAP_CRYPTO)
qcom_ice_enable(host->ice);
}
static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
{
struct ufs_hba *hba = host->hba;
struct device *dev = hba->dev;
struct qcom_ice *ice;
ice = of_qcom_ice_get(dev);
if (ice == ERR_PTR(-EOPNOTSUPP)) {
dev_warn(dev, "Disabling inline encryption support\n");
ice = NULL;
}
if (IS_ERR_OR_NULL(ice))
return PTR_ERR_OR_ZERO(ice);
host->ice = ice;
hba->caps |= UFSHCD_CAP_CRYPTO;
return 0;
}
static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
{
if (host->hba->caps & UFSHCD_CAP_CRYPTO)
return qcom_ice_resume(host->ice);
return 0;
}
static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
{
if (host->hba->caps & UFSHCD_CAP_CRYPTO)
return qcom_ice_suspend(host->ice);
return 0;
}
static int ufs_qcom_ice_program_key(struct ufs_hba *hba,
const union ufs_crypto_cfg_entry *cfg,
int slot)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
union ufs_crypto_cap_entry cap;
bool config_enable =
cfg->config_enable & UFS_CRYPTO_CONFIGURATION_ENABLE;
/* Only AES-256-XTS has been tested so far. */
cap = hba->crypto_cap_array[cfg->crypto_cap_idx];
if (cap.algorithm_id != UFS_CRYPTO_ALG_AES_XTS ||
cap.key_size != UFS_CRYPTO_KEY_SIZE_256)
return -EINVAL;
if (config_enable)
return qcom_ice_program_key(host->ice,
QCOM_ICE_CRYPTO_ALG_AES_XTS,
QCOM_ICE_CRYPTO_KEY_SIZE_256,
cfg->crypto_key,
cfg->data_unit_size, slot);
else
return qcom_ice_evict_key(host->ice, slot);
}
#else
#define ufs_qcom_ice_program_key NULL
static inline void ufs_qcom_ice_enable(struct ufs_qcom_host *host)
{
}
static int ufs_qcom_ice_init(struct ufs_qcom_host *host)
{
return 0;
}
static inline int ufs_qcom_ice_resume(struct ufs_qcom_host *host)
{
return 0;
}
static inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *host)
{
return 0;
}
#endif
static int ufs_qcom_host_clk_get(struct device *dev,
const char *name, struct clk **clk_out, bool optional)
{
struct clk *clk;
int err = 0;
clk = devm_clk_get(dev, name);
if (!IS_ERR(clk)) {
*clk_out = clk;
return 0;
}
err = PTR_ERR(clk);
if (optional && err == -ENOENT) {
*clk_out = NULL;
return 0;
}
if (err != -EPROBE_DEFER)
dev_err(dev, "failed to get %s err %d\n", name, err);
return err;
}
static int ufs_qcom_host_clk_enable(struct device *dev,
const char *name, struct clk *clk)
{
int err = 0;
err = clk_prepare_enable(clk);
if (err)
dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
return err;
}
static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
{
if (!host->is_lane_clks_enabled)
return;
clk_disable_unprepare(host->tx_l1_sync_clk);
clk_disable_unprepare(host->tx_l0_sync_clk);
clk_disable_unprepare(host->rx_l1_sync_clk);
clk_disable_unprepare(host->rx_l0_sync_clk);
host->is_lane_clks_enabled = false;
}
static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
{
int err;
struct device *dev = host->hba->dev;
if (host->is_lane_clks_enabled)
return 0;
err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
host->rx_l0_sync_clk);
if (err)
return err;
err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
host->tx_l0_sync_clk);
if (err)
goto disable_rx_l0;
err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
host->rx_l1_sync_clk);
if (err)
goto disable_tx_l0;
err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
host->tx_l1_sync_clk);
if (err)
goto disable_rx_l1;
host->is_lane_clks_enabled = true;
return 0;
disable_rx_l1:
clk_disable_unprepare(host->rx_l1_sync_clk);
disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
clk_disable_unprepare(host->rx_l0_sync_clk);
return err;
}
static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
{
int err = 0;
struct device *dev = host->hba->dev;
if (has_acpi_companion(dev))
return 0;
err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
&host->rx_l0_sync_clk, false);
if (err)
return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
&host->tx_l0_sync_clk, false);
if (err)
return err;
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
&host->rx_l1_sync_clk, false);
if (err)
return err;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
&host->tx_l1_sync_clk, true);
}
return 0;
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
{
int err;
u32 tx_fsm_val = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
do {
err = ufshcd_dme_get(hba,
UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
&tx_fsm_val);
if (err || tx_fsm_val == TX_FSM_HIBERN8)
break;
/* sleep for max. 200us */
usleep_range(100, 200);
} while (time_before(jiffies, timeout));
/*
* we might have scheduled out for long during polling so
* check the state again.
*/
if (time_after(jiffies, timeout))
err = ufshcd_dme_get(hba,
UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
&tx_fsm_val);
if (err) {
dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
__func__, err);
} else if (tx_fsm_val != TX_FSM_HIBERN8) {
err = tx_fsm_val;
dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
__func__, err);
}
return err;
}
static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
{
ufshcd_rmwl(host->hba, QUNIPRO_SEL,
ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
REG_UFS_CFG1);
if (host->hw_ver.major >= 0x05)
ufshcd_rmwl(host->hba, QUNIPRO_G4_SEL, 0, REG_UFS_CFG0);
/* make sure above configuration is applied before we return */
mb();
}
/*
* ufs_qcom_host_reset - reset host controller and PHY
*/
static int ufs_qcom_host_reset(struct ufs_hba *hba)
{
int ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
bool reenable_intr = false;
if (!host->core_reset) {
dev_warn(hba->dev, "%s: reset control not set\n", __func__);
return 0;
}
reenable_intr = hba->is_irq_enabled;
disable_irq(hba->irq);
hba->is_irq_enabled = false;
ret = reset_control_assert(host->core_reset);
if (ret) {
dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
__func__, ret);
return ret;
}
/*
* The hardware requirement for delay between assert/deassert
* is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
* ~125us (4/32768). To be on the safe side add 200us delay.
*/
usleep_range(200, 210);
ret = reset_control_deassert(host->core_reset);
if (ret)
dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
__func__, ret);
usleep_range(1000, 1100);
if (reenable_intr) {
enable_irq(hba->irq);
hba->is_irq_enabled = true;
}
return 0;
}
static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (host->hw_ver.major == 0x1) {
/*
* HS-G3 operations may not reliably work on legacy QCOM
* UFS host controller hardware even though capability
* exchange during link startup phase may end up
* negotiating maximum supported gear as G3.
* Hence downgrade the maximum supported gear to HS-G2.
*/
return UFS_HS_G2;
} else if (host->hw_ver.major >= 0x4) {
return UFS_QCOM_MAX_GEAR(ufshcd_readl(hba, REG_UFS_PARAM0));
}
/* Default is HS-G3 */
return UFS_HS_G3;
}
static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
int ret;
/* Reset UFS Host Controller and PHY */
ret = ufs_qcom_host_reset(hba);
if (ret)
dev_warn(hba->dev, "%s: host reset returned %d\n",
__func__, ret);
/* phy initialization - calibrate the phy */
ret = phy_init(phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
return ret;
}
phy_set_mode_ext(phy, PHY_MODE_UFS_HS_B, host->hs_gear);
/* power on phy - start serdes and phy's power and clocks */
ret = phy_power_on(phy);
if (ret) {
dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
__func__, ret);
goto out_disable_phy;
}
ufs_qcom_select_unipro_mode(host);
return 0;
out_disable_phy:
phy_exit(phy);
return ret;
}
/*
* The UTP controller has a number of internal clock gating cells (CGCs).
* Internal hardware sub-modules within the UTP controller control the CGCs.
* Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
* in a specific operation, UTP controller CGCs are by default disabled and
* this function enables them (after every UFS link startup) to save some power
* leakage.
*/
static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
mb();
}
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err = 0;
switch (status) {
case PRE_CHANGE:
ufs_qcom_power_up_sequence(hba);
/*
* The PHY PLL output is the source of tx/rx lane symbol
* clocks, hence, enable the lane clocks only after PHY
* is initialized.
*/
err = ufs_qcom_enable_lane_clks(host);
break;
case POST_CHANGE:
/* check if UFS PHY moved from DISABLED to HIBERN8 */
err = ufs_qcom_check_hibern8(hba);
ufs_qcom_enable_hw_clk_gating(hba);
ufs_qcom_ice_enable(host);
break;
default:
dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
err = -EINVAL;
break;
}
return err;
}
/*
* Return: zero for success and non-zero in case of a failure.
*/
static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
u32 hs, u32 rate, bool update_link_startup_timer)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_clk_info *clki;
u32 core_clk_period_in_ns;
u32 tx_clk_cycles_per_us = 0;
unsigned long core_clk_rate = 0;
u32 core_clk_cycles_per_us = 0;
static u32 pwm_fr_table[][2] = {
{UFS_PWM_G1, 0x1},
{UFS_PWM_G2, 0x1},
{UFS_PWM_G3, 0x1},
{UFS_PWM_G4, 0x1},
};
static u32 hs_fr_table_rA[][2] = {
{UFS_HS_G1, 0x1F},
{UFS_HS_G2, 0x3e},
{UFS_HS_G3, 0x7D},
};
static u32 hs_fr_table_rB[][2] = {
{UFS_HS_G1, 0x24},
{UFS_HS_G2, 0x49},
{UFS_HS_G3, 0x92},
};
/*
* The Qunipro controller does not use following registers:
* SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
* UFS_REG_PA_LINK_STARTUP_TIMER
* But UTP controller uses SYS1CLK_1US_REG register for Interrupt
* Aggregation logic.
*/
if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
return 0;
if (gear == 0) {
dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
return -EINVAL;
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk"))
core_clk_rate = clk_get_rate(clki->clk);
}
/* If frequency is smaller than 1MHz, set to 1MHz */
if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
core_clk_rate = DEFAULT_CLK_RATE_HZ;
core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
/*
* make sure above write gets applied before we return from
* this function.
*/
mb();
}
if (ufs_qcom_cap_qunipro(host))
return 0;
core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
core_clk_period_in_ns &= MASK_CLK_NS_REG;
switch (hs) {
case FASTAUTO_MODE:
case FAST_MODE:
if (rate == PA_HS_MODE_A) {
if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
dev_err(hba->dev,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rA));
return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
} else if (rate == PA_HS_MODE_B) {
if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
dev_err(hba->dev,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(hs_fr_table_rB));
return -EINVAL;
}
tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
} else {
dev_err(hba->dev, "%s: invalid rate = %d\n",
__func__, rate);
return -EINVAL;
}
break;
case SLOWAUTO_MODE:
case SLOW_MODE:
if (gear > ARRAY_SIZE(pwm_fr_table)) {
dev_err(hba->dev,
"%s: index %d exceeds table size %zu\n",
__func__, gear,
ARRAY_SIZE(pwm_fr_table));
return -EINVAL;
}
tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
break;
case UNCHANGED:
default:
dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
return -EINVAL;
}
if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
(core_clk_period_in_ns | tx_clk_cycles_per_us)) {
/* this register 2 fields shall be written at once */
ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
REG_UFS_TX_SYMBOL_CLK_NS_US);
/*
* make sure above write gets applied before we return from
* this function.
*/
mb();
}
if (update_link_startup_timer && host->hw_ver.major != 0x5) {
ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
REG_UFS_CFG0);
/*
* make sure that this configuration is applied before
* we return
*/
mb();
}
return 0;
}
static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
switch (status) {
case PRE_CHANGE:
if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
0, true)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
return -EINVAL;
}
if (ufs_qcom_cap_qunipro(host))
/*
* set unipro core clock cycles to 150 & clear clock
* divider
*/
err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
150);
/*
* Some UFS devices (and may be host) have issues if LCC is
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
* before link startup which will make sure that both host
* and device TX LCC are disabled once link startup is
* completed.
*/
if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
err = ufshcd_disable_host_tx_lcc(hba);
break;
default:
break;
}
return err;
}
static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
/* reset gpio is optional */
if (!host->device_reset)
return;
gpiod_set_value_cansleep(host->device_reset, asserted);
}
static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
if (status == PRE_CHANGE)
return 0;
if (ufs_qcom_is_link_off(hba)) {
/*
* Disable the tx/rx lane symbol clocks before PHY is
* powered down as the PLL source should be disabled
* after downstream clocks are disabled.
*/
ufs_qcom_disable_lane_clks(host);
phy_power_off(phy);
/* reset the connected UFS device during power down */
ufs_qcom_device_reset_ctrl(hba, true);
} else if (!ufs_qcom_is_link_active(hba)) {
ufs_qcom_disable_lane_clks(host);
}
return ufs_qcom_ice_suspend(host);
}
static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct phy *phy = host->generic_phy;
int err;
if (ufs_qcom_is_link_off(hba)) {
err = phy_power_on(phy);
if (err) {
dev_err(hba->dev, "%s: failed PHY power on: %d\n",
__func__, err);
return err;
}
err = ufs_qcom_enable_lane_clks(host);
if (err)
return err;
} else if (!ufs_qcom_is_link_active(hba)) {
err = ufs_qcom_enable_lane_clks(host);
if (err)
return err;
}
return ufs_qcom_ice_resume(host);
}
static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
{
if (host->dev_ref_clk_ctrl_mmio &&
(enable ^ host->is_dev_ref_clk_enabled)) {
u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
if (enable)
temp |= host->dev_ref_clk_en_mask;
else
temp &= ~host->dev_ref_clk_en_mask;
/*
* If we are here to disable this clock it might be immediately
* after entering into hibern8 in which case we need to make
* sure that device ref_clk is active for specific time after
* hibern8 enter.
*/
if (!enable) {
unsigned long gating_wait;
gating_wait = host->hba->dev_info.clk_gating_wait_us;
if (!gating_wait) {
udelay(1);
} else {
/*
* bRefClkGatingWaitTime defines the minimum
* time for which the reference clock is
* required by device during transition from
* HS-MODE to LS-MODE or HIBERN8 state. Give it
* more delay to be on the safe side.
*/
gating_wait += 10;
usleep_range(gating_wait, gating_wait + 10);
}
}
writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
/*
* Make sure the write to ref_clk reaches the destination and
* not stored in a Write Buffer (WB).
*/
readl(host->dev_ref_clk_ctrl_mmio);
/*
* If we call hibern8 exit after this, we need to make sure that
* device ref_clk is stable for at least 1us before the hibern8
* exit command.
*/
if (enable)
udelay(1);
host->is_dev_ref_clk_enabled = enable;
}
}
static int ufs_qcom_icc_set_bw(struct ufs_qcom_host *host, u32 mem_bw, u32 cfg_bw)
{
struct device *dev = host->hba->dev;
int ret;
ret = icc_set_bw(host->icc_ddr, 0, mem_bw);
if (ret < 0) {
dev_err(dev, "failed to set bandwidth request: %d\n", ret);
return ret;
}
ret = icc_set_bw(host->icc_cpu, 0, cfg_bw);
if (ret < 0) {
dev_err(dev, "failed to set bandwidth request: %d\n", ret);
return ret;
}
return 0;
}
static struct __ufs_qcom_bw_table ufs_qcom_get_bw_table(struct ufs_qcom_host *host)
{
struct ufs_pa_layer_attr *p = &host->dev_req_params;
int gear = max_t(u32, p->gear_rx, p->gear_tx);
int lane = max_t(u32, p->lane_rx, p->lane_tx);
if (ufshcd_is_hs_mode(p)) {
if (p->hs_rate == PA_HS_MODE_B)
return ufs_qcom_bw_table[MODE_HS_RB][gear][lane];
else
return ufs_qcom_bw_table[MODE_HS_RA][gear][lane];
} else {
return ufs_qcom_bw_table[MODE_PWM][gear][lane];
}
}
static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
{
struct __ufs_qcom_bw_table bw_table;
bw_table = ufs_qcom_get_bw_table(host);
return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
}
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_dev_params ufs_qcom_cap;
int ret = 0;
if (!dev_req_params) {
pr_err("%s: incoming dev_req_params is NULL\n", __func__);
return -EINVAL;
}
switch (status) {
case PRE_CHANGE:
ufshcd_init_pwr_dev_param(&ufs_qcom_cap);
ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
/* This driver only supports symmetic gear setting i.e., hs_tx_gear == hs_rx_gear */
ufs_qcom_cap.hs_tx_gear = ufs_qcom_cap.hs_rx_gear = ufs_qcom_get_hs_gear(hba);
ret = ufshcd_get_pwr_dev_param(&ufs_qcom_cap,
dev_max_params,
dev_req_params);
if (ret) {
dev_err(hba->dev, "%s: failed to determine capabilities\n",
__func__);
return ret;
}
/* Use the agreed gear */
host->hs_gear = dev_req_params->gear_tx;
/* enable the device ref clock before changing to HS mode */
if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
ufshcd_is_hs_mode(dev_req_params))
ufs_qcom_dev_ref_clk_ctrl(host, true);
if (host->hw_ver.major >= 0x4) {
ufshcd_dme_configure_adapt(hba,
dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
}
break;
case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
dev_req_params->pwr_rx,
dev_req_params->hs_rate, false)) {
dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
__func__);
/*
* we return error code at the end of the routine,
* but continue to configure UFS_PHY_TX_LANE_ENABLE
* and bus voting as usual
*/
ret = -EINVAL;
}
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
ufs_qcom_icc_update_bw(host);
/* disable the device ref clock if entered PWM mode */
if (ufshcd_is_hs_mode(&hba->pwr_info) &&
!ufshcd_is_hs_mode(dev_req_params))
ufs_qcom_dev_ref_clk_ctrl(host, false);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
{
int err;
u32 pa_vs_config_reg1;
err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
&pa_vs_config_reg1);
if (err)
return err;
/* Allow extension of MSB bits of PA_SaveConfigTime attribute */
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
(pa_vs_config_reg1 | (1 << 12)));
}
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
{
int err = 0;
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
if (hba->dev_info.wmanufacturerid == UFS_VENDOR_WDC)
hba->dev_quirks |= UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
return err;
}
static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (host->hw_ver.major == 0x1)
return ufshci_version(1, 1);
else
return ufshci_version(2, 0);
}
/**
* ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
* @hba: host controller instance
*
* QCOM UFS host controller might have some non standard behaviours (quirks)
* than what is specified by UFSHCI specification. Advertise all such
* quirks to standard UFS host controller driver so standard takes them into
* account.
*/
static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (host->hw_ver.major == 0x01) {
hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
}
if (host->hw_ver.major == 0x2) {
hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
if (!ufs_qcom_cap_qunipro(host))
/* Legacy UniPro mode still need following quirks */
hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
}
if (host->hw_ver.major > 0x3)
hba->quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH;
}
static void ufs_qcom_set_caps(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
hba->caps |= UFSHCD_CAP_WB_EN;
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
if (host->hw_ver.major >= 0x2) {
host->caps = UFS_QCOM_CAP_QUNIPRO |
UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
}
}
/**
* ufs_qcom_setup_clocks - enables/disable clocks
* @hba: host controller instance
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
* Return: 0 on success, non-zero on failure.
*/
static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
* This ufs_qcom_setup_clocks() shall be called from
* ufs_qcom_init() after init is done.
*/
if (!host)
return 0;
switch (status) {
case PRE_CHANGE:
if (on) {
ufs_qcom_icc_update_bw(host);
} else {
if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
}
break;
case POST_CHANGE:
if (on) {
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
} else {
ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw,
ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw);
}
break;
}
return 0;
}
static int
ufs_qcom_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
ufs_qcom_assert_reset(host->hba);
/* provide 1ms delay to let the reset pulse propagate. */
usleep_range(1000, 1100);
return 0;
}
static int
ufs_qcom_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
{
struct ufs_qcom_host *host = rcdev_to_ufs_host(rcdev);
ufs_qcom_deassert_reset(host->hba);
/*
* after reset deassertion, phy will need all ref clocks,
* voltage, current to settle down before starting serdes.
*/
usleep_range(1000, 1100);
return 0;
}
static const struct reset_control_ops ufs_qcom_reset_ops = {
.assert = ufs_qcom_reset_assert,
.deassert = ufs_qcom_reset_deassert,
};
static int ufs_qcom_icc_init(struct ufs_qcom_host *host)
{
struct device *dev = host->hba->dev;
int ret;
host->icc_ddr = devm_of_icc_get(dev, "ufs-ddr");
if (IS_ERR(host->icc_ddr))
return dev_err_probe(dev, PTR_ERR(host->icc_ddr),
"failed to acquire interconnect path\n");
host->icc_cpu = devm_of_icc_get(dev, "cpu-ufs");
if (IS_ERR(host->icc_cpu))
return dev_err_probe(dev, PTR_ERR(host->icc_cpu),
"failed to acquire interconnect path\n");
/*
* Set Maximum bandwidth vote before initializing the UFS controller and
* device. Ideally, a minimal interconnect vote would suffice for the
* initialization, but a max vote would allow faster initialization.
*/
ret = ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MAX][0][0].mem_bw,
ufs_qcom_bw_table[MODE_MAX][0][0].cfg_bw);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to set bandwidth request\n");
return 0;
}
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
*
* Binds PHY with controller and powers up PHY enabling clocks
* and regulators.
*
* Return: -EPROBE_DEFER if binding fails, returns negative error
* on phy power up failure and returns zero on success.
*/
static int ufs_qcom_init(struct ufs_hba *hba)
{
int err;
struct device *dev = hba->dev;
struct platform_device *pdev = to_platform_device(dev);
struct ufs_qcom_host *host;
struct resource *res;
struct ufs_clk_info *clki;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
return -ENOMEM;
}
/* Make a two way bind between the qcom host and the hba */
host->hba = hba;
ufshcd_set_variant(hba, host);
/* Setup the optional reset control of HCI */
host->core_reset = devm_reset_control_get_optional(hba->dev, "rst");
if (IS_ERR(host->core_reset)) {
err = dev_err_probe(dev, PTR_ERR(host->core_reset),
"Failed to get reset control\n");
goto out_variant_clear;
}
/* Fire up the reset controller. Failure here is non-fatal. */
host->rcdev.of_node = dev->of_node;
host->rcdev.ops = &ufs_qcom_reset_ops;
host->rcdev.owner = dev->driver->owner;
host->rcdev.nr_resets = 1;
err = devm_reset_controller_register(dev, &host->rcdev);
if (err)
dev_warn(dev, "Failed to register reset controller\n");
if (!has_acpi_companion(dev)) {
host->generic_phy = devm_phy_get(dev, "ufsphy");
if (IS_ERR(host->generic_phy)) {
err = dev_err_probe(dev, PTR_ERR(host->generic_phy), "Failed to get PHY\n");
goto out_variant_clear;
}
}
err = ufs_qcom_icc_init(host);
if (err)
goto out_variant_clear;
host->device_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_HIGH);
if (IS_ERR(host->device_reset)) {
err = PTR_ERR(host->device_reset);
if (err != -EPROBE_DEFER)
dev_err(dev, "failed to acquire reset gpio: %d\n", err);
goto out_variant_clear;
}
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
/*
* for newer controllers, device reference clock control bit has
* moved inside UFS controller register address space itself.
*/
if (host->hw_ver.major >= 0x02) {
host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
host->dev_ref_clk_en_mask = BIT(26);
} else {
/* "dev_ref_clk_ctrl_mem" is optional resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"dev_ref_clk_ctrl_mem");
if (res) {
host->dev_ref_clk_ctrl_mmio =
devm_ioremap_resource(dev, res);
if (IS_ERR(host->dev_ref_clk_ctrl_mmio))
host->dev_ref_clk_ctrl_mmio = NULL;
host->dev_ref_clk_en_mask = BIT(5);
}
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
if (!strcmp(clki->name, "core_clk_unipro"))
clki->keep_link_active = true;
}
err = ufs_qcom_init_lane_clks(host);
if (err)
goto out_variant_clear;
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
err = ufs_qcom_ice_init(host);
if (err)
goto out_variant_clear;
ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
ufs_qcom_hosts[hba->dev->id] = host;
ufs_qcom_get_default_testbus_cfg(host);
err = ufs_qcom_testbus_config(host);
if (err)
/* Failure is non-fatal */
dev_warn(dev, "%s: failed to configure the testbus %d\n",
__func__, err);
/*
* Power up the PHY using the minimum supported gear (UFS_HS_G2).
* Switching to max gear will be performed during reinit if supported.
*/
host->hs_gear = UFS_HS_G2;
return 0;
out_variant_clear:
ufshcd_set_variant(hba, NULL);
return err;
}
static void ufs_qcom_exit(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
ufs_qcom_disable_lane_clks(host);
phy_power_off(host->generic_phy);
phy_exit(host->generic_phy);
}
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_cycles)
{
int err;
u32 core_clk_ctrl_reg;
if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
return -EINVAL;
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
if (err)
return err;
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
core_clk_ctrl_reg |= clk_cycles;
/* Clear CORE_CLK_DIV_EN */
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
return ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
}
static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
{
/* nothing to do as of now */
return 0;
}
static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (!ufs_qcom_cap_qunipro(host))
return 0;
/* set unipro core clock cycles to 150 and clear clock divider */
return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
}
static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
u32 core_clk_ctrl_reg;
if (!ufs_qcom_cap_qunipro(host))
return 0;
err = ufshcd_dme_get(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
&core_clk_ctrl_reg);
/* make sure CORE_CLK_DIV_EN is cleared */
if (!err &&
(core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
err = ufshcd_dme_set(hba,
UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
core_clk_ctrl_reg);
}
return err;
}
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
if (!ufs_qcom_cap_qunipro(host))
return 0;
/* set unipro core clock cycles to 75 and clear clock divider */
return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
}
static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
bool scale_up, enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
int err = 0;
/* check the host controller state before sending hibern8 cmd */
if (!ufshcd_is_hba_active(hba))
return 0;
if (status == PRE_CHANGE) {
err = ufshcd_uic_hibern8_enter(hba);
if (err)
return err;
if (scale_up)
err = ufs_qcom_clk_scale_up_pre_change(hba);
else
err = ufs_qcom_clk_scale_down_pre_change(hba);
if (err)
ufshcd_uic_hibern8_exit(hba);
} else {
if (scale_up)
err = ufs_qcom_clk_scale_up_post_change(hba);
else
err = ufs_qcom_clk_scale_down_post_change(hba);
if (err) {
ufshcd_uic_hibern8_exit(hba);
return err;
}
ufs_qcom_cfg_timers(hba,
dev_req_params->gear_rx,
dev_req_params->pwr_rx,
dev_req_params->hs_rate,
false);
ufs_qcom_icc_update_bw(host);
ufshcd_uic_hibern8_exit(hba);
}
return 0;
}
static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
{
ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
}
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
{
/* provide a legal default configuration */
host->testbus.select_major = TSTBUS_UNIPRO;
host->testbus.select_minor = 37;
}
static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
{
if (host->testbus.select_major >= TSTBUS_MAX) {
dev_err(host->hba->dev,
"%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
__func__, host->testbus.select_major);
return false;
}
return true;
}
int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
{
int reg;
int offset;
u32 mask = TEST_BUS_SUB_SEL_MASK;
if (!host)
return -EINVAL;
if (!ufs_qcom_testbus_cfg_is_ok(host))
return -EPERM;
switch (host->testbus.select_major) {
case TSTBUS_UAWM:
reg = UFS_TEST_BUS_CTRL_0;
offset = 24;
break;
case TSTBUS_UARM:
reg = UFS_TEST_BUS_CTRL_0;
offset = 16;
break;
case TSTBUS_TXUC:
reg = UFS_TEST_BUS_CTRL_0;
offset = 8;
break;
case TSTBUS_RXUC:
reg = UFS_TEST_BUS_CTRL_0;
offset = 0;
break;
case TSTBUS_DFC:
reg = UFS_TEST_BUS_CTRL_1;
offset = 24;
break;
case TSTBUS_TRLUT:
reg = UFS_TEST_BUS_CTRL_1;
offset = 16;
break;
case TSTBUS_TMRLUT:
reg = UFS_TEST_BUS_CTRL_1;
offset = 8;
break;
case TSTBUS_OCSC:
reg = UFS_TEST_BUS_CTRL_1;
offset = 0;
break;
case TSTBUS_WRAPPER:
reg = UFS_TEST_BUS_CTRL_2;
offset = 16;
break;
case TSTBUS_COMBINED:
reg = UFS_TEST_BUS_CTRL_2;
offset = 8;
break;
case TSTBUS_UTP_HCI:
reg = UFS_TEST_BUS_CTRL_2;
offset = 0;
break;
case TSTBUS_UNIPRO:
reg = UFS_UNIPRO_CFG;
offset = 20;
mask = 0xFFF;
break;
/*
* No need for a default case, since
* ufs_qcom_testbus_cfg_is_ok() checks that the configuration
* is legal
*/
}
mask <<= offset;
ufshcd_rmwl(host->hba, TEST_BUS_SEL,
(u32)host->testbus.select_major << 19,
REG_UFS_CFG1);
ufshcd_rmwl(host->hba, mask,
(u32)host->testbus.select_minor << offset,
reg);
ufs_qcom_enable_test_bus(host);
/*
* Make sure the test bus configuration is
* committed before returning.
*/
mb();
return 0;
}
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
u32 reg;
struct ufs_qcom_host *host;
host = ufshcd_get_variant(hba);
ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
"HCI Vendor Specific Registers ");
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
ufshcd_dump_regs(hba, reg, 44 * 4, "UFS_UFS_DBG_RD_REG_OCSC ");
reg = ufshcd_readl(hba, REG_UFS_CFG1);
reg |= UTP_DBG_RAMS_EN;
ufshcd_writel(hba, reg, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
ufshcd_dump_regs(hba, reg, 32 * 4, "UFS_UFS_DBG_RD_EDTL_RAM ");
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
ufshcd_dump_regs(hba, reg, 128 * 4, "UFS_UFS_DBG_RD_DESC_RAM ");
reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
ufshcd_dump_regs(hba, reg, 64 * 4, "UFS_UFS_DBG_RD_PRDT_RAM ");
/* clear bit 17 - UTP_DBG_RAMS_EN */
ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UAWM ");
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
ufshcd_dump_regs(hba, reg, 4 * 4, "UFS_DBG_RD_REG_UARM ");
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
ufshcd_dump_regs(hba, reg, 48 * 4, "UFS_DBG_RD_REG_TXUC ");
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
ufshcd_dump_regs(hba, reg, 27 * 4, "UFS_DBG_RD_REG_RXUC ");
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
ufshcd_dump_regs(hba, reg, 19 * 4, "UFS_DBG_RD_REG_DFC ");
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
ufshcd_dump_regs(hba, reg, 34 * 4, "UFS_DBG_RD_REG_TRLUT ");
reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
ufshcd_dump_regs(hba, reg, 9 * 4, "UFS_DBG_RD_REG_TMRLUT ");
}
/**
* ufs_qcom_device_reset() - toggle the (optional) device reset line
* @hba: per-adapter instance
*
* Toggles the (optional) reset line to reset the attached device.
*/
static int ufs_qcom_device_reset(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
/* reset gpio is optional */
if (!host->device_reset)
return -EOPNOTSUPP;
/*
* The UFS device shall detect reset pulses of 1us, sleep for 10us to
* be on the safe side.
*/
ufs_qcom_device_reset_ctrl(hba, true);
usleep_range(10, 15);
ufs_qcom_device_reset_ctrl(hba, false);
usleep_range(10, 15);
return 0;
}
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
struct devfreq_dev_profile *p,
struct devfreq_simple_ondemand_data *d)
{
p->polling_ms = 60;
p->timer = DEVFREQ_TIMER_DELAYED;
d->upthreshold = 70;
d->downdifferential = 5;
}
#else
static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
struct devfreq_dev_profile *p,
struct devfreq_simple_ondemand_data *data)
{
}
#endif
static void ufs_qcom_reinit_notify(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
phy_power_off(host->generic_phy);
}
/* Resources */
static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
{.name = "ufs_mem",},
{.name = "mcq",},
/* Submission Queue DAO */
{.name = "mcq_sqd",},
/* Submission Queue Interrupt Status */
{.name = "mcq_sqis",},
/* Completion Queue DAO */
{.name = "mcq_cqd",},
/* Completion Queue Interrupt Status */
{.name = "mcq_cqis",},
/* MCQ vendor specific */
{.name = "mcq_vs",},
};
static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
{
struct platform_device *pdev = to_platform_device(hba->dev);
struct ufshcd_res_info *res;
struct resource *res_mem, *res_mcq;
int i, ret = 0;
memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
for (i = 0; i < RES_MAX; i++) {
res = &hba->res[i];
res->resource = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
res->name);
if (!res->resource) {
dev_info(hba->dev, "Resource %s not provided\n", res->name);
if (i == RES_UFS)
return -ENOMEM;
continue;
} else if (i == RES_UFS) {
res_mem = res->resource;
res->base = hba->mmio_base;
continue;
}
res->base = devm_ioremap_resource(hba->dev, res->resource);
if (IS_ERR(res->base)) {
dev_err(hba->dev, "Failed to map res %s, err=%d\n",
res->name, (int)PTR_ERR(res->base));
ret = PTR_ERR(res->base);
res->base = NULL;
return ret;
}
}
/* MCQ resource provided in DT */
res = &hba->res[RES_MCQ];
/* Bail if MCQ resource is provided */
if (res->base)
goto out;
/* Explicitly allocate MCQ resource from ufs_mem */
res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
if (!res_mcq)
return -ENOMEM;
res_mcq->start = res_mem->start +
MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
res_mcq->flags = res_mem->flags;
res_mcq->name = "mcq";
ret = insert_resource(&iomem_resource, res_mcq);
if (ret) {
dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
ret);
return ret;
}
res->base = devm_ioremap_resource(hba->dev, res_mcq);
if (IS_ERR(res->base)) {
dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
(int)PTR_ERR(res->base));
ret = PTR_ERR(res->base);
goto ioremap_err;
}
out:
hba->mcq_base = res->base;
return 0;
ioremap_err:
res->base = NULL;
remove_resource(res_mcq);
return ret;
}
static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
{
struct ufshcd_res_info *mem_res, *sqdao_res;
struct ufshcd_mcq_opr_info_t *opr;
int i;
mem_res = &hba->res[RES_UFS];
sqdao_res = &hba->res[RES_MCQ_SQD];
if (!mem_res->base || !sqdao_res->base)
return -EINVAL;
for (i = 0; i < OPR_MAX; i++) {
opr = &hba->mcq_opr[i];
opr->offset = sqdao_res->resource->start -
mem_res->resource->start + 0x40 * i;
opr->stride = 0x100;
opr->base = sqdao_res->base + 0x40 * i;
}
return 0;
}
static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
{
/* Qualcomm HC supports up to 64 */
return MAX_SUPP_MAC;
}
static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
unsigned long *ocqs)
{
struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
if (!mcq_vs_res->base)
return -EINVAL;
*ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
return 0;
}
static void ufs_qcom_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct device *dev = msi_desc_to_dev(desc);
struct ufs_hba *hba = dev_get_drvdata(dev);
ufshcd_mcq_config_esi(hba, msg);
}
static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
{
struct msi_desc *desc = data;
struct device *dev = msi_desc_to_dev(desc);
struct ufs_hba *hba = dev_get_drvdata(dev);
u32 id = desc->msi_index;
struct ufs_hw_queue *hwq = &hba->uhq[id];
ufshcd_mcq_write_cqis(hba, 0x1, id);
ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED;
}
static int ufs_qcom_config_esi(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct msi_desc *desc;
struct msi_desc *failed_desc = NULL;
int nr_irqs, ret;
if (host->esi_enabled)
return 0;
/*
* 1. We only handle CQs as of now.
* 2. Poll queues do not need ESI.
*/
nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
ret = platform_msi_domain_alloc_irqs(hba->dev, nr_irqs,
ufs_qcom_write_msi_msg);
if (ret) {
dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
goto out;
}
msi_lock_descs(hba->dev);
msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
ret = devm_request_irq(hba->dev, desc->irq,
ufs_qcom_mcq_esi_handler,
IRQF_SHARED, "qcom-mcq-esi", desc);
if (ret) {
dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
__func__, desc->irq, ret);
failed_desc = desc;
break;
}
}
msi_unlock_descs(hba->dev);
if (ret) {
/* Rewind */
msi_lock_descs(hba->dev);
msi_for_each_desc(desc, hba->dev, MSI_DESC_ALL) {
if (desc == failed_desc)
break;
devm_free_irq(hba->dev, desc->irq, hba);
}
msi_unlock_descs(hba->dev);
platform_msi_domain_free_irqs(hba->dev);
} else {
if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
host->hw_ver.step == 0) {
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_CFG3) | 0x1F000,
REG_UFS_CFG3);
}
ufshcd_mcq_enable_esi(hba);
}
out:
if (!ret)
host->esi_enabled = true;
return ret;
}
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
* The variant operations configure the necessary controller and PHY
* handshake during initialization.
*/
static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.name = "qcom",
.init = ufs_qcom_init,
.exit = ufs_qcom_exit,
.get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
.clk_scale_notify = ufs_qcom_clk_scale_notify,
.setup_clocks = ufs_qcom_setup_clocks,
.hce_enable_notify = ufs_qcom_hce_enable_notify,
.link_startup_notify = ufs_qcom_link_startup_notify,
.pwr_change_notify = ufs_qcom_pwr_change_notify,
.apply_dev_quirks = ufs_qcom_apply_dev_quirks,
.suspend = ufs_qcom_suspend,
.resume = ufs_qcom_resume,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
.device_reset = ufs_qcom_device_reset,
.config_scaling_param = ufs_qcom_config_scaling_param,
.program_key = ufs_qcom_ice_program_key,
.reinit_notify = ufs_qcom_reinit_notify,
.mcq_config_resource = ufs_qcom_mcq_config_resource,
.get_hba_mac = ufs_qcom_get_hba_mac,
.op_runtime_config = ufs_qcom_op_runtime_config,
.get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
.config_esi = ufs_qcom_config_esi,
};
/**
* ufs_qcom_probe - probe routine of the driver
* @pdev: pointer to Platform device handle
*
* Return: zero for success and non-zero for failure.
*/
static int ufs_qcom_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
/* Perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
if (err)
return dev_err_probe(dev, err, "ufshcd_pltfrm_init() failed\n");
return 0;
}
/**
* ufs_qcom_remove - set driver_data of the device to NULL
* @pdev: pointer to platform device handle
*
* Always returns 0
*/
static int ufs_qcom_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
platform_msi_domain_free_irqs(hba->dev);
return 0;
}
static const struct of_device_id ufs_qcom_of_match[] __maybe_unused = {
{ .compatible = "qcom,ufshc"},
{},
};
MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id ufs_qcom_acpi_match[] = {
{ "QCOM24A5" },
{ },
};
MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match);
#endif
static const struct dev_pm_ops ufs_qcom_pm_ops = {
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
#ifdef CONFIG_PM_SLEEP
.suspend = ufshcd_system_suspend,
.resume = ufshcd_system_resume,
.freeze = ufshcd_system_freeze,
.restore = ufshcd_system_restore,
.thaw = ufshcd_system_thaw,
#endif
};
static struct platform_driver ufs_qcom_pltform = {
.probe = ufs_qcom_probe,
.remove = ufs_qcom_remove,
.driver = {
.name = "ufshcd-qcom",
.pm = &ufs_qcom_pm_ops,
.of_match_table = of_match_ptr(ufs_qcom_of_match),
.acpi_match_table = ACPI_PTR(ufs_qcom_acpi_match),
},
};
module_platform_driver(ufs_qcom_pltform);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/ufs/host/ufs-qcom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2019 MediaTek Inc.
* Authors:
* Stanley Chu <[email protected]>
* Peter Wang <[email protected]>
*/
#include <linux/arm-smccc.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
#include <ufs/ufs_quirks.h>
#include <ufs/unipro.h>
#include "ufs-mediatek.h"
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
#undef CREATE_TRACE_POINTS
#define MAX_SUPP_MAC 64
#define MCQ_QUEUE_OFFSET(c) ((((c) >> 16) & 0xFF) * 0x200)
static const struct ufs_dev_quirk ufs_mtk_dev_fixups[] = {
{ .wmanufacturerid = UFS_ANY_VENDOR,
.model = UFS_ANY_MODEL,
.quirk = UFS_DEVICE_QUIRK_DELAY_AFTER_LPM |
UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM },
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
.model = "H9HQ21AFAMZDAR",
.quirk = UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES },
{}
};
static const struct of_device_id ufs_mtk_of_match[] = {
{ .compatible = "mediatek,mt8183-ufshci" },
{},
};
/*
* Details of UIC Errors
*/
static const char *const ufs_uic_err_str[] = {
"PHY Adapter Layer",
"Data Link Layer",
"Network Link Layer",
"Transport Link Layer",
"DME"
};
static const char *const ufs_uic_pa_err_str[] = {
"PHY error on Lane 0",
"PHY error on Lane 1",
"PHY error on Lane 2",
"PHY error on Lane 3",
"Generic PHY Adapter Error. This should be the LINERESET indication"
};
static const char *const ufs_uic_dl_err_str[] = {
"NAC_RECEIVED",
"TCx_REPLAY_TIMER_EXPIRED",
"AFCx_REQUEST_TIMER_EXPIRED",
"FCx_PROTECTION_TIMER_EXPIRED",
"CRC_ERROR",
"RX_BUFFER_OVERFLOW",
"MAX_FRAME_LENGTH_EXCEEDED",
"WRONG_SEQUENCE_NUMBER",
"AFC_FRAME_SYNTAX_ERROR",
"NAC_FRAME_SYNTAX_ERROR",
"EOF_SYNTAX_ERROR",
"FRAME_SYNTAX_ERROR",
"BAD_CTRL_SYMBOL_TYPE",
"PA_INIT_ERROR",
"PA_ERROR_IND_RECEIVED",
"PA_INIT"
};
static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
return !!(host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
}
static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
}
static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
}
static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
return !!(host->caps & UFS_MTK_CAP_PMC_VIA_FASTAUTO);
}
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
if (enable) {
ufshcd_dme_get(hba,
UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
tmp = tmp |
(1 << RX_SYMBOL_CLK_GATE_EN) |
(1 << SYS_CLK_GATE_EN) |
(1 << TX_CLK_GATE_EN);
ufshcd_dme_set(hba,
UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
ufshcd_dme_get(hba,
UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
ufshcd_dme_set(hba,
UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
} else {
ufshcd_dme_get(hba,
UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
(1 << SYS_CLK_GATE_EN) |
(1 << TX_CLK_GATE_EN));
ufshcd_dme_set(hba,
UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
ufshcd_dme_get(hba,
UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
ufshcd_dme_set(hba,
UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
}
}
static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
{
struct arm_smccc_res res;
ufs_mtk_crypto_ctrl(res, 1);
if (res.a0) {
dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n",
__func__, res.a0);
hba->caps &= ~UFSHCD_CAP_CRYPTO;
}
}
static void ufs_mtk_host_reset(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
reset_control_assert(host->hci_reset);
reset_control_assert(host->crypto_reset);
reset_control_assert(host->unipro_reset);
usleep_range(100, 110);
reset_control_deassert(host->unipro_reset);
reset_control_deassert(host->crypto_reset);
reset_control_deassert(host->hci_reset);
}
static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
struct reset_control **rc,
char *str)
{
*rc = devm_reset_control_get(hba->dev, str);
if (IS_ERR(*rc)) {
dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
str, PTR_ERR(*rc));
*rc = NULL;
}
}
static void ufs_mtk_init_reset(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
ufs_mtk_init_reset_control(hba, &host->hci_reset,
"hci_rst");
ufs_mtk_init_reset_control(hba, &host->unipro_reset,
"unipro_rst");
ufs_mtk_init_reset_control(hba, &host->crypto_reset,
"crypto_rst");
}
static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) {
if (host->unipro_lpm) {
hba->vps->hba_enable_delay_us = 0;
} else {
hba->vps->hba_enable_delay_us = 600;
ufs_mtk_host_reset(hba);
}
if (hba->caps & UFSHCD_CAP_CRYPTO)
ufs_mtk_crypto_enable(hba);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8) {
ufshcd_writel(hba, 0,
REG_AUTO_HIBERNATE_IDLE_TIMER);
hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
hba->ahit = 0;
}
/*
* Turn on CLK_CG early to bypass abnormal ERR_CHK signal
* to prevent host hang issue
*/
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80,
REG_UFS_XOUFS_CTRL);
}
return 0;
}
static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct device *dev = hba->dev;
struct device_node *np = dev->of_node;
int err = 0;
host->mphy = devm_of_phy_get_by_index(dev, np, 0);
if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
/*
* UFS driver might be probed before the phy driver does.
* In that case we would like to return EPROBE_DEFER code.
*/
err = -EPROBE_DEFER;
dev_info(dev,
"%s: required phy hasn't probed yet. err = %d\n",
__func__, err);
} else if (IS_ERR(host->mphy)) {
err = PTR_ERR(host->mphy);
if (err != -ENODEV) {
dev_info(dev, "%s: PHY get failed %d\n", __func__,
err);
}
}
if (err)
host->mphy = NULL;
/*
* Allow unbound mphy because not every platform needs specific
* mphy control.
*/
if (err == -ENODEV)
err = 0;
return err;
}
static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
ktime_t timeout, time_checked;
u32 value;
if (host->ref_clk_enabled == on)
return 0;
ufs_mtk_ref_clk_notify(on, PRE_CHANGE, res);
if (on) {
ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
} else {
ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
}
/* Wait for ack */
timeout = ktime_add_us(ktime_get(), REFCLK_REQ_TIMEOUT_US);
do {
time_checked = ktime_get();
value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
/* Wait until ack bit equals to req bit */
if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
goto out;
usleep_range(100, 200);
} while (ktime_before(time_checked, timeout));
dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
ufs_mtk_ref_clk_notify(host->ref_clk_enabled, POST_CHANGE, res);
return -ETIMEDOUT;
out:
host->ref_clk_enabled = on;
if (on)
ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
ufs_mtk_ref_clk_notify(on, POST_CHANGE, res);
return 0;
}
static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
u16 gating_us)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (hba->dev_info.clk_gating_wait_us) {
host->ref_clk_gating_wait_us =
hba->dev_info.clk_gating_wait_us;
} else {
host->ref_clk_gating_wait_us = gating_us;
}
host->ref_clk_ungating_wait_us = REFCLK_DEFAULT_WAIT_US;
}
static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (((host->ip_ver >> 16) & 0xFF) >= 0x36) {
ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL);
ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0);
ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1);
ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2);
ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3);
} else {
ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
}
}
static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
unsigned long retry_ms)
{
u64 timeout, time_checked;
u32 val, sm;
bool wait_idle;
/* cannot use plain ktime_get() in suspend */
timeout = ktime_get_mono_fast_ns() + retry_ms * 1000000UL;
/* wait a specific time after check base */
udelay(10);
wait_idle = false;
do {
time_checked = ktime_get_mono_fast_ns();
ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
sm = val & 0x1f;
/*
* if state is in H8 enter and H8 enter confirm
* wait until return to idle state.
*/
if ((sm >= VS_HIB_ENTER) && (sm <= VS_HIB_EXIT)) {
wait_idle = true;
udelay(50);
continue;
} else if (!wait_idle)
break;
if (wait_idle && (sm == VS_HCE_BASE))
break;
} while (time_checked < timeout);
if (wait_idle && sm != VS_HCE_BASE)
dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
}
static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
unsigned long max_wait_ms)
{
ktime_t timeout, time_checked;
u32 val;
timeout = ktime_add_ms(ktime_get(), max_wait_ms);
do {
time_checked = ktime_get();
ufs_mtk_dbg_sel(hba);
val = ufshcd_readl(hba, REG_UFS_PROBE);
val = val >> 28;
if (val == state)
return 0;
/* Sleep for max. 200us */
usleep_range(100, 200);
} while (ktime_before(time_checked, timeout));
return -ETIMEDOUT;
}
static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct phy *mphy = host->mphy;
struct arm_smccc_res res;
int ret = 0;
if (!mphy || !(on ^ host->mphy_powered_on))
return 0;
if (on) {
if (ufs_mtk_is_va09_supported(hba)) {
ret = regulator_enable(host->reg_va09);
if (ret < 0)
goto out;
/* wait 200 us to stablize VA09 */
usleep_range(200, 210);
ufs_mtk_va09_pwr_ctrl(res, 1);
}
phy_power_on(mphy);
} else {
phy_power_off(mphy);
if (ufs_mtk_is_va09_supported(hba)) {
ufs_mtk_va09_pwr_ctrl(res, 0);
ret = regulator_disable(host->reg_va09);
}
}
out:
if (ret) {
dev_info(hba->dev,
"failed to %s va09: %d\n",
on ? "enable" : "disable",
ret);
} else {
host->mphy_powered_on = on;
}
return ret;
}
static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
struct clk **clk_out)
{
struct clk *clk;
int err = 0;
clk = devm_clk_get(dev, name);
if (IS_ERR(clk))
err = PTR_ERR(clk);
else
*clk_out = clk;
return err;
}
static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_crypt_cfg *cfg;
struct regulator *reg;
int volt, ret;
if (!ufs_mtk_is_boost_crypt_enabled(hba))
return;
cfg = host->crypt;
volt = cfg->vcore_volt;
reg = cfg->reg_vcore;
ret = clk_prepare_enable(cfg->clk_crypt_mux);
if (ret) {
dev_info(hba->dev, "clk_prepare_enable(): %d\n",
ret);
return;
}
if (boost) {
ret = regulator_set_voltage(reg, volt, INT_MAX);
if (ret) {
dev_info(hba->dev,
"failed to set vcore to %d\n", volt);
goto out;
}
ret = clk_set_parent(cfg->clk_crypt_mux,
cfg->clk_crypt_perf);
if (ret) {
dev_info(hba->dev,
"failed to set clk_crypt_perf\n");
regulator_set_voltage(reg, 0, INT_MAX);
goto out;
}
} else {
ret = clk_set_parent(cfg->clk_crypt_mux,
cfg->clk_crypt_lp);
if (ret) {
dev_info(hba->dev,
"failed to set clk_crypt_lp\n");
goto out;
}
ret = regulator_set_voltage(reg, 0, INT_MAX);
if (ret) {
dev_info(hba->dev,
"failed to set vcore to MIN\n");
}
}
out:
clk_disable_unprepare(cfg->clk_crypt_mux);
}
static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
struct clk **clk)
{
int ret;
ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
if (ret) {
dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
name, ret);
}
return ret;
}
static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_crypt_cfg *cfg;
struct device *dev = hba->dev;
struct regulator *reg;
u32 volt;
host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
GFP_KERNEL);
if (!host->crypt)
goto disable_caps;
reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
if (IS_ERR(reg)) {
dev_info(dev, "failed to get dvfsrc-vcore: %ld",
PTR_ERR(reg));
goto disable_caps;
}
if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
&volt)) {
dev_info(dev, "failed to get boost-crypt-vcore-min");
goto disable_caps;
}
cfg = host->crypt;
if (ufs_mtk_init_host_clk(hba, "crypt_mux",
&cfg->clk_crypt_mux))
goto disable_caps;
if (ufs_mtk_init_host_clk(hba, "crypt_lp",
&cfg->clk_crypt_lp))
goto disable_caps;
if (ufs_mtk_init_host_clk(hba, "crypt_perf",
&cfg->clk_crypt_perf))
goto disable_caps;
cfg->reg_vcore = reg;
cfg->vcore_volt = volt;
host->caps |= UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
disable_caps:
return;
}
static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
host->reg_va09 = regulator_get(hba->dev, "va09");
if (IS_ERR(host->reg_va09))
dev_info(hba->dev, "failed to get va09");
else
host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
}
static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct device_node *np = hba->dev->of_node;
if (of_property_read_bool(np, "mediatek,ufs-boost-crypt"))
ufs_mtk_init_boost_crypt(hba);
if (of_property_read_bool(np, "mediatek,ufs-support-va09"))
ufs_mtk_init_va09_pwr_ctrl(hba);
if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
host->caps |= UFS_MTK_CAP_DISABLE_AH8;
if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
host->caps |= UFS_MTK_CAP_BROKEN_VCC;
if (of_property_read_bool(np, "mediatek,ufs-pmc-via-fastauto"))
host->caps |= UFS_MTK_CAP_PMC_VIA_FASTAUTO;
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
static void ufs_mtk_boost_pm_qos(struct ufs_hba *hba, bool boost)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (!host || !host->pm_qos_init)
return;
cpu_latency_qos_update_request(&host->pm_qos_req,
boost ? 0 : PM_QOS_DEFAULT_VALUE);
}
static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up)
{
ufs_mtk_boost_crypt(hba, scale_up);
ufs_mtk_boost_pm_qos(hba, scale_up);
}
static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (on) {
phy_power_on(host->mphy);
ufs_mtk_setup_ref_clk(hba, on);
if (!ufshcd_is_clkscaling_supported(hba))
ufs_mtk_scale_perf(hba, on);
} else {
if (!ufshcd_is_clkscaling_supported(hba))
ufs_mtk_scale_perf(hba, on);
ufs_mtk_setup_ref_clk(hba, on);
phy_power_off(host->mphy);
}
}
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
* @on: If true, enable clocks else disable them.
* @status: PRE_CHANGE or POST_CHANGE notify
*
* Return: 0 on success, non-zero on failure.
*/
static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
bool clk_pwr_off = false;
int ret = 0;
/*
* In case ufs_mtk_init() is not yet done, simply ignore.
* This ufs_mtk_setup_clocks() shall be called from
* ufs_mtk_init() after init is done.
*/
if (!host)
return 0;
if (!on && status == PRE_CHANGE) {
if (ufshcd_is_link_off(hba)) {
clk_pwr_off = true;
} else if (ufshcd_is_link_hibern8(hba) ||
(!ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_auto_hibern8_enabled(hba))) {
/*
* Gate ref-clk and poweroff mphy if link state is in
* OFF or Hibern8 by either Auto-Hibern8 or
* ufshcd_link_state_transition().
*/
ret = ufs_mtk_wait_link_state(hba,
VS_LINK_HIBERN8,
15);
if (!ret)
clk_pwr_off = true;
}
if (clk_pwr_off)
ufs_mtk_pwr_ctrl(hba, false);
} else if (on && status == POST_CHANGE) {
ufs_mtk_pwr_ctrl(hba, true);
}
return ret;
}
static void ufs_mtk_get_controller_version(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
int ret, ver = 0;
if (host->hw_ver.major)
return;
/* Set default (minimum) version anyway */
host->hw_ver.major = 2;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver);
if (!ret) {
if (ver >= UFS_UNIPRO_VER_1_8) {
host->hw_ver.major = 3;
/*
* Fix HCI version for some platforms with
* incorrect version
*/
if (hba->ufs_version < ufshci_version(3, 0))
hba->ufs_version = ufshci_version(3, 0);
}
}
}
static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba)
{
return hba->ufs_version;
}
/**
* ufs_mtk_init_clocks - Init mtk driver private clocks
*
* @hba: per adapter instance
*/
static void ufs_mtk_init_clocks(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct list_head *head = &hba->clk_list_head;
struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki, *clki_tmp;
/*
* Find private clocks and store them in struct ufs_mtk_clk.
* Remove "ufs_sel_min_src" and "ufs_sel_min_src" from list to avoid
* being switched on/off in clock gating.
*/
list_for_each_entry_safe(clki, clki_tmp, head, list) {
if (!strcmp(clki->name, "ufs_sel")) {
host->mclk.ufs_sel_clki = clki;
} else if (!strcmp(clki->name, "ufs_sel_max_src")) {
host->mclk.ufs_sel_max_clki = clki;
clk_disable_unprepare(clki->clk);
list_del(&clki->list);
} else if (!strcmp(clki->name, "ufs_sel_min_src")) {
host->mclk.ufs_sel_min_clki = clki;
clk_disable_unprepare(clki->clk);
list_del(&clki->list);
}
}
if (!mclk->ufs_sel_clki || !mclk->ufs_sel_max_clki ||
!mclk->ufs_sel_min_clki) {
hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
dev_info(hba->dev,
"%s: Clk-scaling not ready. Feature disabled.",
__func__);
}
}
#define MAX_VCC_NAME 30
static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
{
struct ufs_vreg_info *info = &hba->vreg_info;
struct device_node *np = hba->dev->of_node;
struct device *dev = hba->dev;
char vcc_name[MAX_VCC_NAME];
struct arm_smccc_res res;
int err, ver;
if (hba->vreg_info.vcc)
return 0;
if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
ufs_mtk_get_vcc_num(res);
if (res.a1 > UFS_VCC_NONE && res.a1 < UFS_VCC_MAX)
snprintf(vcc_name, MAX_VCC_NAME, "vcc-opt%lu", res.a1);
else
return -ENODEV;
} else if (of_property_read_bool(np, "mediatek,ufs-vcc-by-ver")) {
ver = (hba->dev_info.wspecversion & 0xF00) >> 8;
snprintf(vcc_name, MAX_VCC_NAME, "vcc-ufs%u", ver);
} else {
return 0;
}
err = ufshcd_populate_vreg(dev, vcc_name, &info->vcc);
if (err)
return err;
err = ufshcd_get_vreg(dev, info->vcc);
if (err)
return err;
err = regulator_enable(info->vcc->reg);
if (!err) {
info->vcc->enabled = true;
dev_info(dev, "%s: %s enabled\n", __func__, vcc_name);
}
return err;
}
static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
{
struct ufs_vreg_info *info = &hba->vreg_info;
struct ufs_vreg **vreg_on, **vreg_off;
if (hba->dev_info.wspecversion >= 0x0300) {
vreg_on = &info->vccq;
vreg_off = &info->vccq2;
} else {
vreg_on = &info->vccq2;
vreg_off = &info->vccq;
}
if (*vreg_on)
(*vreg_on)->always_on = true;
if (*vreg_off) {
regulator_disable((*vreg_off)->reg);
devm_kfree(hba->dev, (*vreg_off)->name);
devm_kfree(hba->dev, *vreg_off);
*vreg_off = NULL;
}
}
static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct platform_device *pdev;
int i;
int irq;
host->mcq_nr_intr = UFSHCD_MAX_Q_NR;
pdev = container_of(hba->dev, struct platform_device, dev);
for (i = 0; i < host->mcq_nr_intr; i++) {
/* irq index 0 is legacy irq, sq/cq irq start from index 1 */
irq = platform_get_irq(pdev, i + 1);
if (irq < 0) {
host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
goto failed;
}
host->mcq_intr_info[i].hba = hba;
host->mcq_intr_info[i].irq = irq;
dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq);
}
return;
failed:
/* invalidate irq info */
for (i = 0; i < host->mcq_nr_intr; i++)
host->mcq_intr_info[i].irq = MTK_MCQ_INVALID_IRQ;
host->mcq_nr_intr = 0;
}
/**
* ufs_mtk_init - find other essential mmio bases
* @hba: host controller instance
*
* Binds PHY with controller and powers up PHY enabling clocks
* and regulators.
*
* Return: -EPROBE_DEFER if binding fails, returns negative error
* on phy power up failure and returns zero on success.
*/
static int ufs_mtk_init(struct ufs_hba *hba)
{
const struct of_device_id *id;
struct device *dev = hba->dev;
struct ufs_mtk_host *host;
int err = 0;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
err = -ENOMEM;
dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
goto out;
}
host->hba = hba;
ufshcd_set_variant(hba, host);
id = of_match_device(ufs_mtk_of_match, dev);
if (!id) {
err = -EINVAL;
goto out;
}
/* Initialize host capability */
ufs_mtk_init_host_caps(hba);
ufs_mtk_init_mcq_irq(hba);
err = ufs_mtk_bind_mphy(hba);
if (err)
goto out_variant_clear;
ufs_mtk_init_reset(hba);
/* Enable runtime autosuspend */
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
/* Enable clock-gating */
hba->caps |= UFSHCD_CAP_CLK_GATING;
/* Enable inline encryption */
hba->caps |= UFSHCD_CAP_CRYPTO;
/* Enable WriteBooster */
hba->caps |= UFSHCD_CAP_WB_EN;
/* Enable clk scaling*/
hba->caps |= UFSHCD_CAP_CLK_SCALING;
hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR;
hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC;
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
ufs_mtk_init_clocks(hba);
/*
* ufshcd_vops_init() is invoked after
* ufshcd_setup_clock(true) in ufshcd_hba_init() thus
* phy clock setup is skipped.
*
* Enable phy clocks specifically here.
*/
ufs_mtk_mphy_power_on(hba, true);
ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER);
/* Initialize pm-qos request */
cpu_latency_qos_add_request(&host->pm_qos_req, PM_QOS_DEFAULT_VALUE);
host->pm_qos_init = true;
goto out;
out_variant_clear:
ufshcd_set_variant(hba, NULL);
out:
return err;
}
static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params)
{
if (!ufs_mtk_is_pmc_via_fastauto(hba))
return false;
if (dev_req_params->hs_rate == hba->pwr_info.hs_rate)
return false;
if (dev_req_params->pwr_tx != FAST_MODE &&
dev_req_params->gear_tx < UFS_HS_G4)
return false;
if (dev_req_params->pwr_rx != FAST_MODE &&
dev_req_params->gear_rx < UFS_HS_G4)
return false;
return true;
}
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_dev_params host_cap;
int ret;
ufshcd_init_pwr_dev_param(&host_cap);
host_cap.hs_rx_gear = UFS_HS_G5;
host_cap.hs_tx_gear = UFS_HS_G5;
ret = ufshcd_get_pwr_dev_param(&host_cap,
dev_max_params,
dev_req_params);
if (ret) {
pr_info("%s: failed to determine capabilities\n",
__func__);
}
if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
dev_req_params->lane_tx);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
dev_req_params->lane_rx);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
dev_req_params->hs_rate);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
PA_NO_ADAPT);
ret = ufshcd_uic_change_pwr_mode(hba,
FASTAUTO_MODE << 4 | FASTAUTO_MODE);
if (ret) {
dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n",
__func__, ret);
}
}
if (host->hw_ver.major >= 3) {
ret = ufshcd_dme_configure_adapt(hba,
dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
}
return ret;
}
static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status stage,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
switch (stage) {
case PRE_CHANGE:
ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
dev_req_params);
break;
case POST_CHANGE:
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm)
{
int ret;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
ret = ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
lpm ? 1 : 0);
if (!ret || !lpm) {
/*
* Forcibly set as non-LPM mode if UIC commands is failed
* to use default hba_enable_delay_us value for re-enabling
* the host.
*/
host->unipro_lpm = lpm;
}
return ret;
}
static int ufs_mtk_pre_link(struct ufs_hba *hba)
{
int ret;
u32 tmp;
ufs_mtk_get_controller_version(hba);
ret = ufs_mtk_unipro_set_lpm(hba, false);
if (ret)
return ret;
/*
* Setting PA_Local_TX_LCC_Enable to 0 before link startup
* to make sure that both host and device TX LCC are disabled
* once link startup is completed.
*/
ret = ufshcd_disable_host_tx_lcc(hba);
if (ret)
return ret;
/* disable deep stall */
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
if (ret)
return ret;
tmp &= ~(1 << 6);
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
return ret;
}
static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
{
u32 ah_ms;
if (ufshcd_is_clkgating_allowed(hba)) {
if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
hba->ahit);
else
ah_ms = 10;
ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
}
}
static void ufs_mtk_post_link(struct ufs_hba *hba)
{
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
/* will be configured during probe hba */
if (ufshcd_is_auto_hibern8_supported(hba))
hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
ufs_mtk_setup_clk_gating(hba);
}
static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status stage)
{
int ret = 0;
switch (stage) {
case PRE_CHANGE:
ret = ufs_mtk_pre_link(hba);
break;
case POST_CHANGE:
ufs_mtk_post_link(hba);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int ufs_mtk_device_reset(struct ufs_hba *hba)
{
struct arm_smccc_res res;
/* disable hba before device reset */
ufshcd_hba_stop(hba);
ufs_mtk_device_reset_ctrl(0, res);
/*
* The reset signal is active low. UFS devices shall detect
* more than or equal to 1us of positive or negative RST_n
* pulse width.
*
* To be on safe side, keep the reset low for at least 10us.
*/
usleep_range(10, 15);
ufs_mtk_device_reset_ctrl(1, res);
/* Some devices may need time to respond to rst_n */
usleep_range(10000, 15000);
dev_info(hba->dev, "device reset done\n");
return 0;
}
static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
{
int err;
err = ufshcd_hba_enable(hba);
if (err)
return err;
err = ufs_mtk_unipro_set_lpm(hba, false);
if (err)
return err;
err = ufshcd_uic_hibern8_exit(hba);
if (!err)
ufshcd_set_link_active(hba);
else
return err;
if (!hba->mcq_enabled) {
err = ufshcd_make_hba_operational(hba);
} else {
ufs_mtk_config_mcq(hba, false);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
/* Enable MCQ mode */
ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x1,
REG_UFS_MEM_CFG);
}
if (err)
return err;
return 0;
}
static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
{
int err;
/* Disable reset confirm feature by UniPro */
ufshcd_writel(hba,
(ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100),
REG_UFS_XOUFS_CTRL);
err = ufs_mtk_unipro_set_lpm(hba, true);
if (err) {
/* Resume UniPro state for following error recovery */
ufs_mtk_unipro_set_lpm(hba, false);
return err;
}
return 0;
}
static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
{
struct ufs_vreg *vccqx = NULL;
if (hba->vreg_info.vccq)
vccqx = hba->vreg_info.vccq;
else
vccqx = hba->vreg_info.vccq2;
regulator_set_mode(vccqx->reg,
lpm ? REGULATOR_MODE_IDLE : REGULATOR_MODE_NORMAL);
}
static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm)
{
struct arm_smccc_res res;
ufs_mtk_device_pwr_ctrl(!lpm,
(unsigned long)hba->dev_info.wspecversion,
res);
}
static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
{
if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
return;
/* Skip if VCC is assumed always-on */
if (!hba->vreg_info.vcc)
return;
/* Bypass LPM when device is still active */
if (lpm && ufshcd_is_ufs_dev_active(hba))
return;
/* Bypass LPM if VCC is enabled */
if (lpm && hba->vreg_info.vcc->enabled)
return;
if (lpm) {
ufs_mtk_vccqx_set_lpm(hba, lpm);
ufs_mtk_vsx_set_lpm(hba, lpm);
} else {
ufs_mtk_vsx_set_lpm(hba, lpm);
ufs_mtk_vccqx_set_lpm(hba, lpm);
}
}
static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
{
int ret;
/* disable auto-hibern8 */
ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
/* wait host return to idle state when auto-hibern8 off */
ufs_mtk_wait_idle_state(hba, 5);
ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
if (ret)
dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
}
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
int err;
struct arm_smccc_res res;
if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba))
ufs_mtk_auto_hibern8_disable(hba);
return 0;
}
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_lpm(hba);
if (err)
goto fail;
}
if (!ufshcd_is_link_active(hba)) {
/*
* Make sure no error will be returned to prevent
* ufshcd_suspend() re-enabling regulators while vreg is still
* in low-power mode.
*/
err = ufs_mtk_mphy_power_on(hba, false);
if (err)
goto fail;
}
if (ufshcd_is_link_off(hba))
ufs_mtk_device_reset_ctrl(0, res);
ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, false, res);
return 0;
fail:
/*
* Set link as off state enforcedly to trigger
* ufshcd_host_reset_and_restore() in ufshcd_suspend()
* for completed host reset.
*/
ufshcd_set_link_off(hba);
return -EAGAIN;
}
static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int err;
struct arm_smccc_res res;
if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
ufs_mtk_dev_vreg_set_lpm(hba, false);
ufs_mtk_host_pwr_ctrl(HOST_PWR_HCI, true, res);
err = ufs_mtk_mphy_power_on(hba, true);
if (err)
goto fail;
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
if (err)
goto fail;
}
return 0;
fail:
return ufshcd_link_recovery(hba);
}
static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
{
/* Dump ufshci register 0x140 ~ 0x14C */
ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10,
"XOUFS Ctrl (0x140): ");
ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
/* Dump ufshci register 0x2200 ~ 0x22AC */
ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
"MPHY Ctrl (0x2200): ");
/* Direct debugging information to REG_MTK_PROBE */
ufs_mtk_dbg_sel(hba);
ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
}
static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
{
struct ufs_dev_info *dev_info = &hba->dev_info;
u16 mid = dev_info->wmanufacturerid;
if (mid == UFS_VENDOR_SAMSUNG) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10);
}
/*
* Decide waiting time before gating reference clock and
* after ungating reference clock according to vendors'
* requirements.
*/
if (mid == UFS_VENDOR_SAMSUNG)
ufs_mtk_setup_ref_clk_wait_us(hba, 1);
else if (mid == UFS_VENDOR_SKHYNIX)
ufs_mtk_setup_ref_clk_wait_us(hba, 30);
else if (mid == UFS_VENDOR_TOSHIBA)
ufs_mtk_setup_ref_clk_wait_us(hba, 100);
else
ufs_mtk_setup_ref_clk_wait_us(hba,
REFCLK_DEFAULT_WAIT_US);
return 0;
}
static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
(hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
hba->vreg_info.vcc->always_on = true;
/*
* VCC will be kept always-on thus we don't
* need any delay during regulator operations
*/
hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
}
ufs_mtk_vreg_fix_vcc(hba);
ufs_mtk_vreg_fix_vccqx(hba);
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
enum ufs_event_type evt, void *data)
{
unsigned int val = *(u32 *)data;
unsigned long reg;
u8 bit;
trace_ufs_mtk_event(evt, val);
/* Print details of UIC Errors */
if (evt <= UFS_EVT_DME_ERR) {
dev_info(hba->dev,
"Host UIC Error Code (%s): %08x\n",
ufs_uic_err_str[evt], val);
reg = val;
}
if (evt == UFS_EVT_PA_ERR) {
for_each_set_bit(bit, ®, ARRAY_SIZE(ufs_uic_pa_err_str))
dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]);
}
if (evt == UFS_EVT_DL_ERR) {
for_each_set_bit(bit, ®, ARRAY_SIZE(ufs_uic_dl_err_str))
dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]);
}
}
static void ufs_mtk_config_scaling_param(struct ufs_hba *hba,
struct devfreq_dev_profile *profile,
struct devfreq_simple_ondemand_data *data)
{
/* Customize min gear in clk scaling */
hba->clk_scaling.min_gear = UFS_HS_G4;
hba->vps->devfreq_profile.polling_ms = 200;
hba->vps->ondemand_data.upthreshold = 50;
hba->vps->ondemand_data.downdifferential = 20;
}
/**
* ufs_mtk_clk_scale - Internal clk scaling operation
*
* MTK platform supports clk scaling by switching parent of ufs_sel(mux).
* The ufs_sel downstream to ufs_ck which feeds directly to UFS hardware.
* Max and min clocks rate of ufs_sel defined in dts should match rate of
* "ufs_sel_max_src" and "ufs_sel_min_src" respectively.
* This prevent changing rate of pll clock that is shared between modules.
*
* @hba: per adapter instance
* @scale_up: True for scaling up and false for scaling down
*/
static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct ufs_mtk_clk *mclk = &host->mclk;
struct ufs_clk_info *clki = mclk->ufs_sel_clki;
int ret = 0;
ret = clk_prepare_enable(clki->clk);
if (ret) {
dev_info(hba->dev,
"clk_prepare_enable() fail, ret: %d\n", ret);
return;
}
if (scale_up) {
ret = clk_set_parent(clki->clk, mclk->ufs_sel_max_clki->clk);
clki->curr_freq = clki->max_freq;
} else {
ret = clk_set_parent(clki->clk, mclk->ufs_sel_min_clki->clk);
clki->curr_freq = clki->min_freq;
}
if (ret) {
dev_info(hba->dev,
"Failed to set ufs_sel_clki, ret: %d\n", ret);
}
clk_disable_unprepare(clki->clk);
trace_ufs_mtk_clk_scale(clki->name, scale_up, clk_get_rate(clki->clk));
}
static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up,
enum ufs_notify_change_status status)
{
if (!ufshcd_is_clkscaling_supported(hba))
return 0;
if (status == PRE_CHANGE) {
/* Switch parent before clk_set_rate() */
ufs_mtk_clk_scale(hba, scale_up);
} else {
/* Request interrupt latency QoS accordingly */
ufs_mtk_scale_perf(hba, scale_up);
}
return 0;
}
static int ufs_mtk_get_hba_mac(struct ufs_hba *hba)
{
return MAX_SUPP_MAC;
}
static int ufs_mtk_op_runtime_config(struct ufs_hba *hba)
{
struct ufshcd_mcq_opr_info_t *opr;
int i;
hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD;
hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS;
hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD;
hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS;
for (i = 0; i < OPR_MAX; i++) {
opr = &hba->mcq_opr[i];
opr->stride = REG_UFS_MCQ_STRIDE;
opr->base = hba->mmio_base + opr->offset;
}
return 0;
}
static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
/* fail mcq initialization if interrupt is not filled properly */
if (!host->mcq_nr_intr) {
dev_info(hba->dev, "IRQs not ready. MCQ disabled.");
return -EINVAL;
}
hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities);
return 0;
}
static irqreturn_t ufs_mtk_mcq_intr(int irq, void *__intr_info)
{
struct ufs_mtk_mcq_intr_info *mcq_intr_info = __intr_info;
struct ufs_hba *hba = mcq_intr_info->hba;
struct ufs_hw_queue *hwq;
u32 events;
int qid = mcq_intr_info->qid;
hwq = &hba->uhq[qid];
events = ufshcd_mcq_read_cqis(hba, qid);
if (events)
ufshcd_mcq_write_cqis(hba, events, qid);
if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS)
ufshcd_mcq_poll_cqe_lock(hba, hwq);
return IRQ_HANDLED;
}
static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 irq, i;
int ret;
for (i = 0; i < host->mcq_nr_intr; i++) {
irq = host->mcq_intr_info[i].irq;
if (irq == MTK_MCQ_INVALID_IRQ) {
dev_err(hba->dev, "invalid irq. %d\n", i);
return -ENOPARAM;
}
host->mcq_intr_info[i].qid = i;
ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD,
&host->mcq_intr_info[i]);
dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : "");
if (ret) {
dev_err(hba->dev, "Cannot request irq %d\n", ret);
return ret;
}
}
return 0;
}
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
int ret = 0;
if (!host->mcq_set_intr) {
/* Disable irq option register */
ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0);
if (irq) {
ret = ufs_mtk_config_mcq_irq(hba);
if (ret)
return ret;
}
host->mcq_set_intr = true;
}
ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0);
ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0);
return 0;
}
static int ufs_mtk_config_esi(struct ufs_hba *hba)
{
return ufs_mtk_config_mcq(hba, true);
}
/*
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
* The variant operations configure the necessary controller and PHY
* handshake during initialization.
*/
static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.name = "mediatek.ufshci",
.init = ufs_mtk_init,
.get_ufs_hci_version = ufs_mtk_get_ufs_hci_version,
.setup_clocks = ufs_mtk_setup_clocks,
.hce_enable_notify = ufs_mtk_hce_enable_notify,
.link_startup_notify = ufs_mtk_link_startup_notify,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
.apply_dev_quirks = ufs_mtk_apply_dev_quirks,
.fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
.suspend = ufs_mtk_suspend,
.resume = ufs_mtk_resume,
.dbg_register_dump = ufs_mtk_dbg_register_dump,
.device_reset = ufs_mtk_device_reset,
.event_notify = ufs_mtk_event_notify,
.config_scaling_param = ufs_mtk_config_scaling_param,
.clk_scale_notify = ufs_mtk_clk_scale_notify,
/* mcq vops */
.get_hba_mac = ufs_mtk_get_hba_mac,
.op_runtime_config = ufs_mtk_op_runtime_config,
.mcq_config_resource = ufs_mtk_mcq_config_resource,
.config_esi = ufs_mtk_config_esi,
};
/**
* ufs_mtk_probe - probe routine of the driver
* @pdev: pointer to Platform device handle
*
* Return: zero for success and non-zero for failure.
*/
static int ufs_mtk_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
struct device_node *reset_node;
struct platform_device *reset_pdev;
struct device_link *link;
reset_node = of_find_compatible_node(NULL, NULL,
"ti,syscon-reset");
if (!reset_node) {
dev_notice(dev, "find ti,syscon-reset fail\n");
goto skip_reset;
}
reset_pdev = of_find_device_by_node(reset_node);
if (!reset_pdev) {
dev_notice(dev, "find reset_pdev fail\n");
goto skip_reset;
}
link = device_link_add(dev, &reset_pdev->dev,
DL_FLAG_AUTOPROBE_CONSUMER);
put_device(&reset_pdev->dev);
if (!link) {
dev_notice(dev, "add reset device_link fail\n");
goto skip_reset;
}
/* supplier is not probed */
if (link->status == DL_STATE_DORMANT) {
err = -EPROBE_DEFER;
goto out;
}
skip_reset:
/* perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
out:
if (err)
dev_err(dev, "probe failed %d\n", err);
of_node_put(reset_node);
return err;
}
/**
* ufs_mtk_remove - set driver_data of the device to NULL
* @pdev: pointer to platform device handle
*
* Always return 0
*/
static int ufs_mtk_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int ufs_mtk_system_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
ret = ufshcd_system_suspend(dev);
if (ret)
return ret;
ufs_mtk_dev_vreg_set_lpm(hba, true);
return 0;
}
static int ufs_mtk_system_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
ufs_mtk_dev_vreg_set_lpm(hba, false);
return ufshcd_system_resume(dev);
}
#endif
#ifdef CONFIG_PM
static int ufs_mtk_runtime_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret = 0;
ret = ufshcd_runtime_suspend(dev);
if (ret)
return ret;
ufs_mtk_dev_vreg_set_lpm(hba, true);
return 0;
}
static int ufs_mtk_runtime_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
ufs_mtk_dev_vreg_set_lpm(hba, false);
return ufshcd_runtime_resume(dev);
}
#endif
static const struct dev_pm_ops ufs_mtk_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufs_mtk_system_suspend,
ufs_mtk_system_resume)
SET_RUNTIME_PM_OPS(ufs_mtk_runtime_suspend,
ufs_mtk_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
static struct platform_driver ufs_mtk_pltform = {
.probe = ufs_mtk_probe,
.remove = ufs_mtk_remove,
.driver = {
.name = "ufshcd-mtk",
.pm = &ufs_mtk_pm_ops,
.of_match_table = ufs_mtk_of_match,
},
};
MODULE_AUTHOR("Stanley Chu <[email protected]>");
MODULE_AUTHOR("Peter Wang <[email protected]>");
MODULE_DESCRIPTION("MediaTek UFS Host Driver");
MODULE_LICENSE("GPL v2");
module_platform_driver(ufs_mtk_pltform);
| linux-master | drivers/ufs/host/ufs-mediatek.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Universal Flash Storage Host controller Platform bus based glue driver
* Copyright (C) 2011-2013 Samsung India Software Operations
*
* Authors:
* Santosh Yaraganavi <[email protected]>
* Vinayak Holikatti <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
#include <ufs/unipro.h>
#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
static int ufshcd_parse_clock_info(struct ufs_hba *hba)
{
int ret = 0;
int cnt;
int i;
struct device *dev = hba->dev;
struct device_node *np = dev->of_node;
const char *name;
u32 *clkfreq = NULL;
struct ufs_clk_info *clki;
int len = 0;
size_t sz = 0;
if (!np)
goto out;
cnt = of_property_count_strings(np, "clock-names");
if (!cnt || (cnt == -EINVAL)) {
dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
__func__);
} else if (cnt < 0) {
dev_err(dev, "%s: count clock strings failed, err %d\n",
__func__, cnt);
ret = cnt;
}
if (cnt <= 0)
goto out;
if (!of_get_property(np, "freq-table-hz", &len)) {
dev_info(dev, "freq-table-hz property not specified\n");
goto out;
}
if (len <= 0)
goto out;
sz = len / sizeof(*clkfreq);
if (sz != 2 * cnt) {
dev_err(dev, "%s len mismatch\n", "freq-table-hz");
ret = -EINVAL;
goto out;
}
clkfreq = devm_kcalloc(dev, sz, sizeof(*clkfreq),
GFP_KERNEL);
if (!clkfreq) {
ret = -ENOMEM;
goto out;
}
ret = of_property_read_u32_array(np, "freq-table-hz",
clkfreq, sz);
if (ret && (ret != -EINVAL)) {
dev_err(dev, "%s: error reading array %d\n",
"freq-table-hz", ret);
return ret;
}
for (i = 0; i < sz; i += 2) {
ret = of_property_read_string_index(np, "clock-names", i/2,
&name);
if (ret)
goto out;
clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
if (!clki) {
ret = -ENOMEM;
goto out;
}
clki->min_freq = clkfreq[i];
clki->max_freq = clkfreq[i+1];
clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!clki->name) {
ret = -ENOMEM;
goto out;
}
if (!strcmp(name, "ref_clk"))
clki->keep_link_active = true;
dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
clki->min_freq, clki->max_freq, clki->name);
list_add_tail(&clki->list, &hba->clk_list_head);
}
out:
return ret;
}
static bool phandle_exists(const struct device_node *np,
const char *phandle_name, int index)
{
struct device_node *parse_np = of_parse_phandle(np, phandle_name, index);
if (parse_np)
of_node_put(parse_np);
return parse_np != NULL;
}
#define MAX_PROP_SIZE 32
int ufshcd_populate_vreg(struct device *dev, const char *name,
struct ufs_vreg **out_vreg)
{
char prop_name[MAX_PROP_SIZE];
struct ufs_vreg *vreg = NULL;
struct device_node *np = dev->of_node;
if (!np) {
dev_err(dev, "%s: non DT initialization\n", __func__);
goto out;
}
snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
if (!phandle_exists(np, prop_name, 0)) {
dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
__func__, prop_name);
goto out;
}
vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
if (!vreg)
return -ENOMEM;
vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
if (!vreg->name)
return -ENOMEM;
snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
dev_info(dev, "%s: unable to find %s\n", __func__, prop_name);
vreg->max_uA = 0;
}
out:
*out_vreg = vreg;
return 0;
}
EXPORT_SYMBOL_GPL(ufshcd_populate_vreg);
/**
* ufshcd_parse_regulator_info - get regulator info from device tree
* @hba: per adapter instance
*
* Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
* If any of the supplies are not defined it is assumed that they are always-on
* and hence return zero. If the property is defined but parsing is failed
* then return corresponding error.
*
* Return: 0 upon success; < 0 upon failure.
*/
static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
{
int err;
struct device *dev = hba->dev;
struct ufs_vreg_info *info = &hba->vreg_info;
err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
if (err)
goto out;
err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
if (err)
goto out;
err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
if (err)
goto out;
err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
out:
return err;
}
static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
int ret;
ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
&hba->lanes_per_direction);
if (ret) {
dev_dbg(hba->dev,
"%s: failed to read lanes-per-direction, ret=%d\n",
__func__, ret);
hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
}
}
/**
* ufshcd_get_pwr_dev_param - get finally agreed attributes for
* power mode change
* @pltfrm_param: pointer to platform parameters
* @dev_max: pointer to device attributes
* @agreed_pwr: returned agreed attributes
*
* Return: 0 on success, non-zero value on failure.
*/
int ufshcd_get_pwr_dev_param(const struct ufs_dev_params *pltfrm_param,
const struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr)
{
int min_pltfrm_gear;
int min_dev_gear;
bool is_dev_sup_hs = false;
bool is_pltfrm_max_hs = false;
if (dev_max->pwr_rx == FAST_MODE)
is_dev_sup_hs = true;
if (pltfrm_param->desired_working_mode == UFS_HS_MODE) {
is_pltfrm_max_hs = true;
min_pltfrm_gear = min_t(u32, pltfrm_param->hs_rx_gear,
pltfrm_param->hs_tx_gear);
} else {
min_pltfrm_gear = min_t(u32, pltfrm_param->pwm_rx_gear,
pltfrm_param->pwm_tx_gear);
}
/*
* device doesn't support HS but
* pltfrm_param->desired_working_mode is HS,
* thus device and pltfrm_param don't agree
*/
if (!is_dev_sup_hs && is_pltfrm_max_hs) {
pr_info("%s: device doesn't support HS\n",
__func__);
return -ENOTSUPP;
} else if (is_dev_sup_hs && is_pltfrm_max_hs) {
/*
* since device supports HS, it supports FAST_MODE.
* since pltfrm_param->desired_working_mode is also HS
* then final decision (FAST/FASTAUTO) is done according
* to pltfrm_params as it is the restricting factor
*/
agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_hs;
agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
} else {
/*
* here pltfrm_param->desired_working_mode is PWM.
* it doesn't matter whether device supports HS or PWM,
* in both cases pltfrm_param->desired_working_mode will
* determine the mode
*/
agreed_pwr->pwr_rx = pltfrm_param->rx_pwr_pwm;
agreed_pwr->pwr_tx = agreed_pwr->pwr_rx;
}
/*
* we would like tx to work in the minimum number of lanes
* between device capability and vendor preferences.
* the same decision will be made for rx
*/
agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
pltfrm_param->tx_lanes);
agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
pltfrm_param->rx_lanes);
/* device maximum gear is the minimum between device rx and tx gears */
min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
/*
* if both device capabilities and vendor pre-defined preferences are
* both HS or both PWM then set the minimum gear to be the chosen
* working gear.
* if one is PWM and one is HS then the one that is PWM get to decide
* what is the gear, as it is the one that also decided previously what
* pwr the device will be configured to.
*/
if ((is_dev_sup_hs && is_pltfrm_max_hs) ||
(!is_dev_sup_hs && !is_pltfrm_max_hs)) {
agreed_pwr->gear_rx =
min_t(u32, min_dev_gear, min_pltfrm_gear);
} else if (!is_dev_sup_hs) {
agreed_pwr->gear_rx = min_dev_gear;
} else {
agreed_pwr->gear_rx = min_pltfrm_gear;
}
agreed_pwr->gear_tx = agreed_pwr->gear_rx;
agreed_pwr->hs_rate = pltfrm_param->hs_rate;
return 0;
}
EXPORT_SYMBOL_GPL(ufshcd_get_pwr_dev_param);
void ufshcd_init_pwr_dev_param(struct ufs_dev_params *dev_param)
{
*dev_param = (struct ufs_dev_params){
.tx_lanes = UFS_LANE_2,
.rx_lanes = UFS_LANE_2,
.hs_rx_gear = UFS_HS_G3,
.hs_tx_gear = UFS_HS_G3,
.pwm_rx_gear = UFS_PWM_G4,
.pwm_tx_gear = UFS_PWM_G4,
.rx_pwr_pwm = SLOW_MODE,
.tx_pwr_pwm = SLOW_MODE,
.rx_pwr_hs = FAST_MODE,
.tx_pwr_hs = FAST_MODE,
.hs_rate = PA_HS_MODE_B,
.desired_working_mode = UFS_HS_MODE,
};
}
EXPORT_SYMBOL_GPL(ufshcd_init_pwr_dev_param);
/**
* ufshcd_pltfrm_init - probe routine of the driver
* @pdev: pointer to Platform device handle
* @vops: pointer to variant ops
*
* Return: 0 on success, non-zero value on failure.
*/
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops)
{
struct ufs_hba *hba;
void __iomem *mmio_base;
int irq, err;
struct device *dev = &pdev->dev;
mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio_base)) {
err = PTR_ERR(mmio_base);
goto out;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
err = irq;
goto out;
}
err = ufshcd_alloc_host(dev, &hba);
if (err) {
dev_err(dev, "Allocation failed\n");
goto out;
}
hba->vops = vops;
err = ufshcd_parse_clock_info(hba);
if (err) {
dev_err(dev, "%s: clock parse failed %d\n",
__func__, err);
goto dealloc_host;
}
err = ufshcd_parse_regulator_info(hba);
if (err) {
dev_err(dev, "%s: regulator init failed %d\n",
__func__, err);
goto dealloc_host;
}
ufshcd_init_lanes_per_dir(hba);
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err_probe(dev, err, "Initialization failed with error %d\n",
err);
goto dealloc_host;
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
dealloc_host:
ufshcd_dealloc_host(hba);
out:
return err;
}
EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
MODULE_AUTHOR("Santosh Yaragnavi <[email protected]>");
MODULE_AUTHOR("Vinayak Holikatti <[email protected]>");
MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/ufs/host/ufshcd-pltfrm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Platform UFS Host driver for Cadence controller
*
* Copyright (C) 2018 Cadence Design Systems, Inc.
*
* Authors:
* Jan Kotas <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/time.h>
#include "ufshcd-pltfrm.h"
#define CDNS_UFS_REG_HCLKDIV 0xFC
#define CDNS_UFS_REG_PHY_XCFGD1 0x113C
#define CDNS_UFS_MAX_L4_ATTRS 12
struct cdns_ufs_host {
/**
* cdns_ufs_dme_attr_val - for storing L4 attributes
*/
u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS];
};
/**
* cdns_ufs_get_l4_attr - get L4 attributes on local side
* @hba: per adapter instance
*
*/
static void cdns_ufs_get_l4_attr(struct ufs_hba *hba)
{
struct cdns_ufs_host *host = ufshcd_get_variant(hba);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID),
&host->cdns_ufs_dme_attr_val[0]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID),
&host->cdns_ufs_dme_attr_val[1]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
&host->cdns_ufs_dme_attr_val[2]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID),
&host->cdns_ufs_dme_attr_val[3]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS),
&host->cdns_ufs_dme_attr_val[4]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
&host->cdns_ufs_dme_attr_val[5]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
&host->cdns_ufs_dme_attr_val[6]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
&host->cdns_ufs_dme_attr_val[7]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
&host->cdns_ufs_dme_attr_val[8]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
&host->cdns_ufs_dme_attr_val[9]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE),
&host->cdns_ufs_dme_attr_val[10]);
ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
&host->cdns_ufs_dme_attr_val[11]);
}
/**
* cdns_ufs_set_l4_attr - set L4 attributes on local side
* @hba: per adapter instance
*
*/
static void cdns_ufs_set_l4_attr(struct ufs_hba *hba)
{
struct cdns_ufs_host *host = ufshcd_get_variant(hba);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID),
host->cdns_ufs_dme_attr_val[0]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID),
host->cdns_ufs_dme_attr_val[1]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS),
host->cdns_ufs_dme_attr_val[2]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID),
host->cdns_ufs_dme_attr_val[3]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS),
host->cdns_ufs_dme_attr_val[4]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE),
host->cdns_ufs_dme_attr_val[5]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE),
host->cdns_ufs_dme_attr_val[6]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE),
host->cdns_ufs_dme_attr_val[7]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE),
host->cdns_ufs_dme_attr_val[8]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND),
host->cdns_ufs_dme_attr_val[9]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE),
host->cdns_ufs_dme_attr_val[10]);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE),
host->cdns_ufs_dme_attr_val[11]);
}
/**
* cdns_ufs_set_hclkdiv() - set HCLKDIV register value based on the core_clk.
* @hba: host controller instance
*
* Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba)
{
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
unsigned long core_clk_rate = 0;
u32 core_clk_div = 0;
if (list_empty(head))
return 0;
list_for_each_entry(clki, head, list) {
if (IS_ERR_OR_NULL(clki->clk))
continue;
if (!strcmp(clki->name, "core_clk"))
core_clk_rate = clk_get_rate(clki->clk);
}
if (!core_clk_rate) {
dev_err(hba->dev, "%s: unable to find core_clk rate\n",
__func__);
return -EINVAL;
}
core_clk_div = core_clk_rate / USEC_PER_SEC;
ufshcd_writel(hba, core_clk_div, CDNS_UFS_REG_HCLKDIV);
/**
* Make sure the register was updated,
* UniPro layer will not work with an incorrect value.
*/
mb();
return 0;
}
/**
* cdns_ufs_hce_enable_notify() - set HCLKDIV register
* @hba: host controller instance
* @status: notify stage (pre, post change)
*
* Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
if (status != PRE_CHANGE)
return 0;
return cdns_ufs_set_hclkdiv(hba);
}
/**
* cdns_ufs_hibern8_notify() - save and restore L4 attributes.
* @hba: host controller instance
* @cmd: UIC Command
* @status: notify stage (pre, post change)
*/
static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd,
enum ufs_notify_change_status status)
{
if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER)
cdns_ufs_get_l4_attr(hba);
if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT)
cdns_ufs_set_l4_attr(hba);
}
/**
* cdns_ufs_link_startup_notify() - handle link startup.
* @hba: host controller instance
* @status: notify stage (pre, post change)
*
* Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
if (status != PRE_CHANGE)
return 0;
/*
* Some UFS devices have issues if LCC is enabled.
* So we are setting PA_Local_TX_LCC_Enable to 0
* before link startup which will make sure that both host
* and device TX LCC are disabled once link startup is
* completed.
*/
ufshcd_disable_host_tx_lcc(hba);
/*
* Disabling Autohibern8 feature in cadence UFS
* to mask unexpected interrupt trigger.
*/
hba->ahit = 0;
return 0;
}
/**
* cdns_ufs_init - performs additional ufs initialization
* @hba: host controller instance
*
* Return: status of initialization.
*/
static int cdns_ufs_init(struct ufs_hba *hba)
{
int status = 0;
struct cdns_ufs_host *host;
struct device *dev = hba->dev;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
ufshcd_set_variant(hba, host);
status = ufshcd_vops_phy_initialization(hba);
return status;
}
/**
* cdns_ufs_m31_16nm_phy_initialization - performs m31 phy initialization
* @hba: host controller instance
*
* Return: 0 (success).
*/
static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba)
{
u32 data;
/* Increase RX_Advanced_Min_ActivateTime_Capability */
data = ufshcd_readl(hba, CDNS_UFS_REG_PHY_XCFGD1);
data |= BIT(24);
ufshcd_writel(hba, data, CDNS_UFS_REG_PHY_XCFGD1);
return 0;
}
static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = {
.name = "cdns-ufs-pltfm",
.init = cdns_ufs_init,
.hce_enable_notify = cdns_ufs_hce_enable_notify,
.link_startup_notify = cdns_ufs_link_startup_notify,
.hibern8_notify = cdns_ufs_hibern8_notify,
};
static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = {
.name = "cdns-ufs-pltfm",
.init = cdns_ufs_init,
.hce_enable_notify = cdns_ufs_hce_enable_notify,
.link_startup_notify = cdns_ufs_link_startup_notify,
.phy_initialization = cdns_ufs_m31_16nm_phy_initialization,
.hibern8_notify = cdns_ufs_hibern8_notify,
};
static const struct of_device_id cdns_ufs_of_match[] = {
{
.compatible = "cdns,ufshc",
.data = &cdns_ufs_pltfm_hba_vops,
},
{
.compatible = "cdns,ufshc-m31-16nm",
.data = &cdns_ufs_m31_16nm_pltfm_hba_vops,
},
{ },
};
MODULE_DEVICE_TABLE(of, cdns_ufs_of_match);
/**
* cdns_ufs_pltfrm_probe - probe routine of the driver
* @pdev: pointer to platform device handle
*
* Return: zero for success and non-zero for failure.
*/
static int cdns_ufs_pltfrm_probe(struct platform_device *pdev)
{
int err;
const struct of_device_id *of_id;
struct ufs_hba_variant_ops *vops;
struct device *dev = &pdev->dev;
of_id = of_match_node(cdns_ufs_of_match, dev->of_node);
vops = (struct ufs_hba_variant_ops *)of_id->data;
/* Perform generic probe */
err = ufshcd_pltfrm_init(pdev, vops);
if (err)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
return err;
}
/**
* cdns_ufs_pltfrm_remove - removes the ufs driver
* @pdev: pointer to platform device handle
*
* Return: 0 (success).
*/
static int cdns_ufs_pltfrm_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
ufshcd_remove(hba);
return 0;
}
static const struct dev_pm_ops cdns_ufs_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
static struct platform_driver cdns_ufs_pltfrm_driver = {
.probe = cdns_ufs_pltfrm_probe,
.remove = cdns_ufs_pltfrm_remove,
.driver = {
.name = "cdns-ufshcd",
.pm = &cdns_ufs_dev_pm_ops,
.of_match_table = cdns_ufs_of_match,
},
};
module_platform_driver(cdns_ufs_pltfrm_driver);
MODULE_AUTHOR("Jan Kotas <[email protected]>");
MODULE_DESCRIPTION("Cadence UFS host controller platform driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/ufs/host/cdns-pltfrm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Synopsys G210 Test Chip driver
*
* Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com)
*
* Authors: Joao Pinto <[email protected]>
*/
#include <linux/module.h>
#include <ufs/ufshcd.h>
#include <ufs/unipro.h>
#include "ufshcd-dwc.h"
#include "ufshci-dwc.h"
#include "tc-dwc-g210.h"
/**
* tc_dwc_g210_setup_40bit_rmmi() - configure 40-bit RMMI.
* @hba: Pointer to drivers structure
*
* Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_40bit_rmmi(struct ufs_hba *hba)
{
static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
{ UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
{ UIC_ARG_MIB(CDIRECTCTRL6), 0x80, DME_LOCAL },
{ UIC_ARG_MIB(CBDIVFACTOR), 0x08, DME_LOCAL },
{ UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL },
{ UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL },
{ UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL },
{ UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x14,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 4,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
DME_LOCAL },
{ UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL },
{ UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
DME_LOCAL },
{ UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL },
};
return ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
ARRAY_SIZE(setup_attrs));
}
/**
* tc_dwc_g210_setup_20bit_rmmi_lane0() - configure 20-bit RMMI Lane 0.
* @hba: Pointer to drivers structure
*
* Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_20bit_rmmi_lane0(struct ufs_hba *hba)
{
static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN0_TX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN0_TX), 0x19,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN0_RX), 0x19,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN0_TX), 0x12,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN0_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN0_RX), 2,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN0_RX), 0x80,
DME_LOCAL },
{ UIC_ARG_MIB(DIRECTCTRL10), 0x04, DME_LOCAL },
{ UIC_ARG_MIB(DIRECTCTRL19), 0x02, DME_LOCAL },
{ UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN0_RX), 0x03,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN0_RX), 0x16,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN0_RX), 0x42,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN0_RX), 0xa4,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN0_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN0_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN0_RX), 0x28,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN0_RX), 0x1E,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN0_RX), 0x2f,
DME_LOCAL },
{ UIC_ARG_MIB(CBPRGPLL2), 0x00, DME_LOCAL },
};
return ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
ARRAY_SIZE(setup_attrs));
}
/**
* tc_dwc_g210_setup_20bit_rmmi_lane1() - configure 20-bit RMMI Lane 1.
* @hba: Pointer to drivers structure
*
* Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_20bit_rmmi_lane1(struct ufs_hba *hba)
{
int connected_rx_lanes = 0;
int connected_tx_lanes = 0;
int ret = 0;
static const struct ufshcd_dme_attr_val setup_tx_attrs[] = {
{ UIC_ARG_MIB_SEL(TX_REFCLKFREQ, SELIND_LN1_TX), 0x0d,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(TX_CFGCLKFREQVAL, SELIND_LN1_TX), 0x19,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGEXTRATTR, SELIND_LN1_TX), 0x12,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(DITHERCTRL2, SELIND_LN0_TX), 0xd6,
DME_LOCAL },
};
static const struct ufshcd_dme_attr_val setup_rx_attrs[] = {
{ UIC_ARG_MIB_SEL(RX_REFCLKFREQ, SELIND_LN1_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RX_CFGCLKFREQVAL, SELIND_LN1_RX), 0x19,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGWIDEINLN, SELIND_LN1_RX), 2,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXCDR8, SELIND_LN1_RX), 0x80,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(ENARXDIRECTCFG4, SELIND_LN1_RX), 0x03,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXOVR8, SELIND_LN1_RX), 0x16,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RXDIRECTCTRL2, SELIND_LN1_RX), 0x42,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(ENARXDIRECTCFG3, SELIND_LN1_RX), 0xa4,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RXCALCTRL, SELIND_LN1_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(ENARXDIRECTCFG2, SELIND_LN1_RX), 0x01,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXOVR4, SELIND_LN1_RX), 0x28,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(RXSQCTRL, SELIND_LN1_RX), 0x1E,
DME_LOCAL },
{ UIC_ARG_MIB_SEL(CFGRXOVR6, SELIND_LN1_RX), 0x2f,
DME_LOCAL },
};
/* Get the available lane count */
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
&connected_rx_lanes);
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
&connected_tx_lanes);
if (connected_tx_lanes == 2) {
ret = ufshcd_dwc_dme_set_attrs(hba, setup_tx_attrs,
ARRAY_SIZE(setup_tx_attrs));
if (ret)
goto out;
}
if (connected_rx_lanes == 2) {
ret = ufshcd_dwc_dme_set_attrs(hba, setup_rx_attrs,
ARRAY_SIZE(setup_rx_attrs));
}
out:
return ret;
}
/**
* tc_dwc_g210_setup_20bit_rmmi() - configure 20-bit RMMI.
* @hba: Pointer to drivers structure
*
* Return: 0 on success or non-zero value on failure.
*/
static int tc_dwc_g210_setup_20bit_rmmi(struct ufs_hba *hba)
{
int ret = 0;
static const struct ufshcd_dme_attr_val setup_attrs[] = {
{ UIC_ARG_MIB(TX_GLOBALHIBERNATE), 0x00, DME_LOCAL },
{ UIC_ARG_MIB(REFCLKMODE), 0x01, DME_LOCAL },
{ UIC_ARG_MIB(CDIRECTCTRL6), 0xc0, DME_LOCAL },
{ UIC_ARG_MIB(CBDIVFACTOR), 0x44, DME_LOCAL },
{ UIC_ARG_MIB(CBDCOCTRL5), 0x64, DME_LOCAL },
{ UIC_ARG_MIB(CBPRGTUNING), 0x09, DME_LOCAL },
{ UIC_ARG_MIB(RTOBSERVESELECT), 0x00, DME_LOCAL },
};
ret = ufshcd_dwc_dme_set_attrs(hba, setup_attrs,
ARRAY_SIZE(setup_attrs));
if (ret)
goto out;
/* Lane 0 configuration*/
ret = tc_dwc_g210_setup_20bit_rmmi_lane0(hba);
if (ret)
goto out;
/* Lane 1 configuration*/
ret = tc_dwc_g210_setup_20bit_rmmi_lane1(hba);
if (ret)
goto out;
out:
return ret;
}
/**
* tc_dwc_g210_config_40_bit() - configure 40-bit TC specific attributes.
* @hba: Pointer to drivers structure
*
* Return: 0 on success non-zero value on failure.
*/
int tc_dwc_g210_config_40_bit(struct ufs_hba *hba)
{
int ret = 0;
dev_info(hba->dev, "Configuring Test Chip 40-bit RMMI\n");
ret = tc_dwc_g210_setup_40bit_rmmi(hba);
if (ret) {
dev_err(hba->dev, "Configuration failed\n");
goto out;
}
/* To write Shadow register bank to effective configuration block */
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
if (ret)
goto out;
/* To configure Debug OMC */
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01);
out:
return ret;
}
EXPORT_SYMBOL(tc_dwc_g210_config_40_bit);
/**
* tc_dwc_g210_config_20_bit() - configure 20-bit TC specific attributes.
* @hba: Pointer to drivers structure
*
* Return: 0 on success non-zero value on failure.
*/
int tc_dwc_g210_config_20_bit(struct ufs_hba *hba)
{
int ret = 0;
dev_info(hba->dev, "Configuring Test Chip 20-bit RMMI\n");
ret = tc_dwc_g210_setup_20bit_rmmi(hba);
if (ret) {
dev_err(hba->dev, "Configuration failed\n");
goto out;
}
/* To write Shadow register bank to effective configuration block */
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_MPHYCFGUPDT), 0x01);
if (ret)
goto out;
/* To configure Debug OMC */
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), 0x01);
out:
return ret;
}
EXPORT_SYMBOL(tc_dwc_g210_config_20_bit);
MODULE_AUTHOR("Joao Pinto <[email protected]>");
MODULE_DESCRIPTION("Synopsys G210 Test Chip driver");
MODULE_LICENSE("Dual BSD/GPL");
| linux-master | drivers/ufs/host/tc-dwc-g210.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* UFS Host Controller driver for Exynos specific extensions
*
* Copyright (C) 2014-2015 Samsung Electronics Co., Ltd.
* Author: Seungwon Jeon <[email protected]>
* Author: Alim Akhtar <[email protected]>
*
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <ufs/ufshcd.h>
#include "ufshcd-pltfrm.h"
#include <ufs/ufshci.h>
#include <ufs/unipro.h>
#include "ufs-exynos.h"
/*
* Exynos's Vendor specific registers for UFSHCI
*/
#define HCI_TXPRDT_ENTRY_SIZE 0x00
#define PRDT_PREFECT_EN BIT(31)
#define PRDT_SET_SIZE(x) ((x) & 0x1F)
#define HCI_RXPRDT_ENTRY_SIZE 0x04
#define HCI_1US_TO_CNT_VAL 0x0C
#define CNT_VAL_1US_MASK 0x3FF
#define HCI_UTRL_NEXUS_TYPE 0x40
#define HCI_UTMRL_NEXUS_TYPE 0x44
#define HCI_SW_RST 0x50
#define UFS_LINK_SW_RST BIT(0)
#define UFS_UNIPRO_SW_RST BIT(1)
#define UFS_SW_RST_MASK (UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST)
#define HCI_DATA_REORDER 0x60
#define HCI_UNIPRO_APB_CLK_CTRL 0x68
#define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF))
#define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C
#define HCI_GPIO_OUT 0x70
#define HCI_ERR_EN_PA_LAYER 0x78
#define HCI_ERR_EN_DL_LAYER 0x7C
#define HCI_ERR_EN_N_LAYER 0x80
#define HCI_ERR_EN_T_LAYER 0x84
#define HCI_ERR_EN_DME_LAYER 0x88
#define HCI_CLKSTOP_CTRL 0xB0
#define REFCLKOUT_STOP BIT(4)
#define MPHY_APBCLK_STOP BIT(3)
#define REFCLK_STOP BIT(2)
#define UNIPRO_MCLK_STOP BIT(1)
#define UNIPRO_PCLK_STOP BIT(0)
#define CLK_STOP_MASK (REFCLKOUT_STOP | REFCLK_STOP |\
UNIPRO_MCLK_STOP | MPHY_APBCLK_STOP|\
UNIPRO_PCLK_STOP)
#define HCI_MISC 0xB4
#define REFCLK_CTRL_EN BIT(7)
#define UNIPRO_PCLK_CTRL_EN BIT(6)
#define UNIPRO_MCLK_CTRL_EN BIT(5)
#define HCI_CORECLK_CTRL_EN BIT(4)
#define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\
UNIPRO_PCLK_CTRL_EN |\
UNIPRO_MCLK_CTRL_EN)
/* Device fatal error */
#define DFES_ERR_EN BIT(31)
#define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\
UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
#define DFES_DEF_L3_ERRS (UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\
UIC_NETWORK_BAD_DEVICEID_ENC |\
UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING)
#define DFES_DEF_L4_ERRS (UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\
UIC_TRANSPORT_UNKNOWN_CPORTID |\
UIC_TRANSPORT_NO_CONNECTION_RX |\
UIC_TRANSPORT_BAD_TC)
/* FSYS UFS Shareability */
#define UFS_WR_SHARABLE BIT(2)
#define UFS_RD_SHARABLE BIT(1)
#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
#define UFS_SHAREABILITY_OFFSET 0x710
/* Multi-host registers */
#define MHCTRL 0xC4
#define MHCTRL_EN_VH_MASK (0xE)
#define MHCTRL_EN_VH(vh) (vh << 1)
#define PH2VH_MBOX 0xD8
#define MH_MSG_MASK (0xFF)
#define MH_MSG(id, msg) ((id << 8) | (msg & 0xFF))
#define MH_MSG_PH_READY 0x1
#define MH_MSG_VH_READY 0x2
#define ALLOW_INQUIRY BIT(25)
#define ALLOW_MODE_SELECT BIT(24)
#define ALLOW_MODE_SENSE BIT(23)
#define ALLOW_PRE_FETCH GENMASK(22, 21)
#define ALLOW_READ_CMD_ALL GENMASK(20, 18) /* read_6/10/16 */
#define ALLOW_READ_BUFFER BIT(17)
#define ALLOW_READ_CAPACITY GENMASK(16, 15)
#define ALLOW_REPORT_LUNS BIT(14)
#define ALLOW_REQUEST_SENSE BIT(13)
#define ALLOW_SYNCHRONIZE_CACHE GENMASK(8, 7)
#define ALLOW_TEST_UNIT_READY BIT(6)
#define ALLOW_UNMAP BIT(5)
#define ALLOW_VERIFY BIT(4)
#define ALLOW_WRITE_CMD_ALL GENMASK(3, 1) /* write_6/10/16 */
#define ALLOW_TRANS_VH_DEFAULT (ALLOW_INQUIRY | ALLOW_MODE_SELECT | \
ALLOW_MODE_SENSE | ALLOW_PRE_FETCH | \
ALLOW_READ_CMD_ALL | ALLOW_READ_BUFFER | \
ALLOW_READ_CAPACITY | ALLOW_REPORT_LUNS | \
ALLOW_REQUEST_SENSE | ALLOW_SYNCHRONIZE_CACHE | \
ALLOW_TEST_UNIT_READY | ALLOW_UNMAP | \
ALLOW_VERIFY | ALLOW_WRITE_CMD_ALL)
#define HCI_MH_ALLOWABLE_TRAN_OF_VH 0x30C
#define HCI_MH_IID_IN_TASK_TAG 0X308
#define PH_READY_TIMEOUT_MS (5 * MSEC_PER_SEC)
enum {
UNIPRO_L1_5 = 0,/* PHY Adapter */
UNIPRO_L2, /* Data Link */
UNIPRO_L3, /* Network */
UNIPRO_L4, /* Transport */
UNIPRO_DME, /* DME */
};
/*
* UNIPRO registers
*/
#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0 0x78B8
#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1 0x78BC
#define UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2 0x78C0
/*
* UFS Protector registers
*/
#define UFSPRSECURITY 0x010
#define NSSMU BIT(14)
#define UFSPSBEGIN0 0x200
#define UFSPSEND0 0x204
#define UFSPSLUN0 0x208
#define UFSPSCTRL0 0x20C
#define CNTR_DIV_VAL 40
static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en);
static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en);
static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs *ufs)
{
exynos_ufs_auto_ctrl_hcc(ufs, true);
}
static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs *ufs)
{
exynos_ufs_auto_ctrl_hcc(ufs, false);
}
static inline void exynos_ufs_disable_auto_ctrl_hcc_save(
struct exynos_ufs *ufs, u32 *val)
{
*val = hci_readl(ufs, HCI_MISC);
exynos_ufs_auto_ctrl_hcc(ufs, false);
}
static inline void exynos_ufs_auto_ctrl_hcc_restore(
struct exynos_ufs *ufs, u32 *val)
{
hci_writel(ufs, *val, HCI_MISC);
}
static inline void exynos_ufs_gate_clks(struct exynos_ufs *ufs)
{
exynos_ufs_ctrl_clkstop(ufs, true);
}
static inline void exynos_ufs_ungate_clks(struct exynos_ufs *ufs)
{
exynos_ufs_ctrl_clkstop(ufs, false);
}
static int exynos7_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
{
return 0;
}
static int exynosauto_ufs_drv_init(struct device *dev, struct exynos_ufs *ufs)
{
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
/* IO Coherency setting */
if (ufs->sysreg) {
return regmap_update_bits(ufs->sysreg,
ufs->shareability_reg_offset,
UFS_SHARABLE, UFS_SHARABLE);
}
attr->tx_dif_p_nsec = 3200000;
return 0;
}
static int exynosauto_ufs_post_hce_enable(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
/* Enable Virtual Host #1 */
ufshcd_rmwl(hba, MHCTRL_EN_VH_MASK, MHCTRL_EN_VH(1), MHCTRL);
/* Default VH Transfer permissions */
hci_writel(ufs, ALLOW_TRANS_VH_DEFAULT, HCI_MH_ALLOWABLE_TRAN_OF_VH);
/* IID information is replaced in TASKTAG[7:5] instead of IID in UCD */
hci_writel(ufs, 0x1, HCI_MH_IID_IN_TASK_TAG);
return 0;
}
static int exynosauto_ufs_pre_link(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
int i;
u32 tx_line_reset_period, rx_line_reset_period;
rx_line_reset_period = (RX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
tx_line_reset_period = (TX_LINE_RESET_TIME * ufs->mclk_rate) / NSEC_PER_MSEC;
ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
for_each_ufs_rx_lane(ufs, i) {
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD, i),
DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_CLK_PRD_EN, i), 0x0);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE2, i),
(rx_line_reset_period >> 16) & 0xFF);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE1, i),
(rx_line_reset_period >> 8) & 0xFF);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_RX_LINERESET_VALUE0, i),
(rx_line_reset_period) & 0xFF);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x2f, i), 0x79);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x84, i), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x25, i), 0xf6);
}
for_each_ufs_tx_lane(ufs, i) {
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD, i),
DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
/* Not to affect VND_TX_LINERESET_PVALUE to VND_TX_CLK_PRD */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_CLK_PRD_EN, i),
0x02);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE2, i),
(tx_line_reset_period >> 16) & 0xFF);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE1, i),
(tx_line_reset_period >> 8) & 0xFF);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(VND_TX_LINERESET_PVALUE0, i),
(tx_line_reset_period) & 0xFF);
/* TX PWM Gear Capability / PWM_G1_ONLY */
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x04, i), 0x1);
}
ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
ufshcd_dme_set(hba, UIC_ARG_MIB(0xa011), 0x8000);
return 0;
}
static int exynosauto_ufs_pre_pwr_change(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
/* PACP_PWR_req and delivered to the remote DME */
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
return 0;
}
static int exynosauto_ufs_post_pwr_change(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
u32 enabled_vh;
enabled_vh = ufshcd_readl(hba, MHCTRL) & MHCTRL_EN_VH_MASK;
/* Send physical host ready message to virtual hosts */
ufshcd_writel(hba, MH_MSG(enabled_vh, MH_MSG_PH_READY), PH2VH_MBOX);
return 0;
}
static int exynos7_ufs_pre_link(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
u32 val = ufs->drv_data->uic_attr->pa_dbg_option_suite;
int i;
exynos_ufs_enable_ov_tm(hba);
for_each_ufs_tx_lane(ufs, i)
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x297, i), 0x17);
for_each_ufs_rx_lane(ufs, i) {
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x362, i), 0xff);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x363, i), 0x00);
}
exynos_ufs_disable_ov_tm(hba);
for_each_ufs_tx_lane(ufs, i)
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL, i), 0x0);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT), 0x1);
udelay(1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val | (1 << 12));
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ), 0x1);
udelay(1600);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), val);
return 0;
}
static int exynos7_ufs_post_link(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
int i;
exynos_ufs_enable_ov_tm(hba);
for_each_ufs_tx_lane(ufs, i) {
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x28b, i), 0x83);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x29a, i), 0x07);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x277, i),
TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs, 200000)));
}
exynos_ufs_disable_ov_tm(hba);
exynos_ufs_enable_dbg_mode(hba);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xbb8);
exynos_ufs_disable_dbg_mode(hba);
return 0;
}
static int exynos7_ufs_pre_pwr_change(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr)
{
unipro_writel(ufs, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE);
return 0;
}
static int exynos7_ufs_post_pwr_change(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
int lanes = max_t(u32, pwr->lane_rx, pwr->lane_tx);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT), 0x1);
if (lanes == 1) {
exynos_ufs_enable_dbg_mode(hba);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), 0x1);
exynos_ufs_disable_dbg_mode(hba);
}
return 0;
}
/*
* exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w
* Control should be disabled in the below cases
* - Before host controller S/W reset
* - Access to UFS protector's register
*/
static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs *ufs, bool en)
{
u32 misc = hci_readl(ufs, HCI_MISC);
if (en)
hci_writel(ufs, misc | HCI_CORECLK_CTRL_EN, HCI_MISC);
else
hci_writel(ufs, misc & ~HCI_CORECLK_CTRL_EN, HCI_MISC);
}
static void exynos_ufs_ctrl_clkstop(struct exynos_ufs *ufs, bool en)
{
u32 ctrl = hci_readl(ufs, HCI_CLKSTOP_CTRL);
u32 misc = hci_readl(ufs, HCI_MISC);
if (en) {
hci_writel(ufs, misc | CLK_CTRL_EN_MASK, HCI_MISC);
hci_writel(ufs, ctrl | CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
} else {
hci_writel(ufs, ctrl & ~CLK_STOP_MASK, HCI_CLKSTOP_CTRL);
hci_writel(ufs, misc & ~CLK_CTRL_EN_MASK, HCI_MISC);
}
}
static int exynos_ufs_get_clk_info(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
struct list_head *head = &hba->clk_list_head;
struct ufs_clk_info *clki;
unsigned long pclk_rate;
u32 f_min, f_max;
u8 div = 0;
int ret = 0;
if (list_empty(head))
goto out;
list_for_each_entry(clki, head, list) {
if (!IS_ERR(clki->clk)) {
if (!strcmp(clki->name, "core_clk"))
ufs->clk_hci_core = clki->clk;
else if (!strcmp(clki->name, "sclk_unipro_main"))
ufs->clk_unipro_main = clki->clk;
}
}
if (!ufs->clk_hci_core || !ufs->clk_unipro_main) {
dev_err(hba->dev, "failed to get clk info\n");
ret = -EINVAL;
goto out;
}
ufs->mclk_rate = clk_get_rate(ufs->clk_unipro_main);
pclk_rate = clk_get_rate(ufs->clk_hci_core);
f_min = ufs->pclk_avail_min;
f_max = ufs->pclk_avail_max;
if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
do {
pclk_rate /= (div + 1);
if (pclk_rate <= f_max)
break;
div++;
} while (pclk_rate >= f_min);
}
if (unlikely(pclk_rate < f_min || pclk_rate > f_max)) {
dev_err(hba->dev, "not available pclk range %lu\n", pclk_rate);
ret = -EINVAL;
goto out;
}
ufs->pclk_rate = pclk_rate;
ufs->pclk_div = div;
out:
return ret;
}
static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs *ufs)
{
if (ufs->opts & EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL) {
u32 val;
val = hci_readl(ufs, HCI_UNIPRO_APB_CLK_CTRL);
hci_writel(ufs, UNIPRO_APB_CLK(val, ufs->pclk_div),
HCI_UNIPRO_APB_CLK_CTRL);
}
}
static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
ufshcd_dme_set(hba,
UIC_ARG_MIB(CMN_PWM_CLK_CTRL), attr->cmn_pwm_clk_ctrl);
}
static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
const unsigned int div = 30, mult = 20;
const unsigned long pwm_min = 3 * 1000 * 1000;
const unsigned long pwm_max = 9 * 1000 * 1000;
const int divs[] = {32, 16, 8, 4};
unsigned long clk = 0, _clk, clk_period;
int i = 0, clk_idx = -1;
clk_period = UNIPRO_PCLK_PERIOD(ufs);
for (i = 0; i < ARRAY_SIZE(divs); i++) {
_clk = NSEC_PER_SEC * mult / (clk_period * divs[i] * div);
if (_clk >= pwm_min && _clk <= pwm_max) {
if (_clk > clk) {
clk_idx = i;
clk = _clk;
}
}
}
if (clk_idx == -1) {
ufshcd_dme_get(hba, UIC_ARG_MIB(CMN_PWM_CLK_CTRL), &clk_idx);
dev_err(hba->dev,
"failed to decide pwm clock divider, will not change\n");
}
attr->cmn_pwm_clk_ctrl = clk_idx & PWM_CLK_CTRL_MASK;
}
long exynos_ufs_calc_time_cntr(struct exynos_ufs *ufs, long period)
{
const int precise = 10;
long pclk_rate = ufs->pclk_rate;
long clk_period, fraction;
clk_period = UNIPRO_PCLK_PERIOD(ufs);
fraction = ((NSEC_PER_SEC % pclk_rate) * precise) / pclk_rate;
return (period * precise) / ((clk_period * precise) + fraction);
}
static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs *ufs)
{
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
t_cfg->tx_linereset_p =
exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_p_nsec);
t_cfg->tx_linereset_n =
exynos_ufs_calc_time_cntr(ufs, attr->tx_dif_n_nsec);
t_cfg->tx_high_z_cnt =
exynos_ufs_calc_time_cntr(ufs, attr->tx_high_z_cnt_nsec);
t_cfg->tx_base_n_val =
exynos_ufs_calc_time_cntr(ufs, attr->tx_base_unit_nsec);
t_cfg->tx_gran_n_val =
exynos_ufs_calc_time_cntr(ufs, attr->tx_gran_unit_nsec);
t_cfg->tx_sleep_cnt =
exynos_ufs_calc_time_cntr(ufs, attr->tx_sleep_cnt);
t_cfg->rx_linereset =
exynos_ufs_calc_time_cntr(ufs, attr->rx_dif_p_nsec);
t_cfg->rx_hibern8_wait =
exynos_ufs_calc_time_cntr(ufs, attr->rx_hibern8_wait_nsec);
t_cfg->rx_base_n_val =
exynos_ufs_calc_time_cntr(ufs, attr->rx_base_unit_nsec);
t_cfg->rx_gran_n_val =
exynos_ufs_calc_time_cntr(ufs, attr->rx_gran_unit_nsec);
t_cfg->rx_sleep_cnt =
exynos_ufs_calc_time_cntr(ufs, attr->rx_sleep_cnt);
t_cfg->rx_stall_cnt =
exynos_ufs_calc_time_cntr(ufs, attr->rx_stall_cnt);
}
static void exynos_ufs_config_phy_time_attr(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
struct ufs_phy_time_cfg *t_cfg = &ufs->t_cfg;
int i;
exynos_ufs_set_pwm_clk_div(ufs);
exynos_ufs_enable_ov_tm(hba);
for_each_ufs_rx_lane(ufs, i) {
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE, i),
ufs->drv_data->uic_attr->rx_filler_enable);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_LINERESET_VAL, i),
RX_LINERESET(t_cfg->rx_linereset));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00, i),
RX_BASE_NVAL_L(t_cfg->rx_base_n_val));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08, i),
RX_BASE_NVAL_H(t_cfg->rx_base_n_val));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00, i),
RX_GRAN_NVAL_L(t_cfg->rx_gran_n_val));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08, i),
RX_GRAN_NVAL_H(t_cfg->rx_gran_n_val));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER, i),
RX_OV_SLEEP_CNT(t_cfg->rx_sleep_cnt));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER, i),
RX_OV_STALL_CNT(t_cfg->rx_stall_cnt));
}
for_each_ufs_tx_lane(ufs, i) {
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL, i),
TX_LINERESET_P(t_cfg->tx_linereset_p));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00, i),
TX_HIGH_Z_CNT_L(t_cfg->tx_high_z_cnt));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08, i),
TX_HIGH_Z_CNT_H(t_cfg->tx_high_z_cnt));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00, i),
TX_BASE_NVAL_L(t_cfg->tx_base_n_val));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08, i),
TX_BASE_NVAL_H(t_cfg->tx_base_n_val));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00, i),
TX_GRAN_NVAL_L(t_cfg->tx_gran_n_val));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08, i),
TX_GRAN_NVAL_H(t_cfg->tx_gran_n_val));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER, i),
TX_OV_H8_ENTER_EN |
TX_OV_SLEEP_CNT(t_cfg->tx_sleep_cnt));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME, i),
ufs->drv_data->uic_attr->tx_min_activatetime);
}
exynos_ufs_disable_ov_tm(hba);
}
static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
int i;
exynos_ufs_enable_ov_tm(hba);
for_each_ufs_rx_lane(ufs, i) {
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP, i),
attr->rx_hs_g1_sync_len_cap);
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP, i),
attr->rx_hs_g2_sync_len_cap);
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP, i),
attr->rx_hs_g3_sync_len_cap);
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP, i),
attr->rx_hs_g1_prep_sync_len_cap);
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP, i),
attr->rx_hs_g2_prep_sync_len_cap);
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP, i),
attr->rx_hs_g3_prep_sync_len_cap);
}
if (attr->rx_adv_fine_gran_sup_en == 0) {
for_each_ufs_rx_lane(ufs, i) {
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP, i), 0);
if (attr->rx_min_actv_time_cap)
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(
RX_MIN_ACTIVATETIME_CAPABILITY, i),
attr->rx_min_actv_time_cap);
if (attr->rx_hibern8_time_cap)
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP, i),
attr->rx_hibern8_time_cap);
}
} else if (attr->rx_adv_fine_gran_sup_en == 1) {
for_each_ufs_rx_lane(ufs, i) {
if (attr->rx_adv_fine_gran_step)
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP,
i), RX_ADV_FINE_GRAN_STEP(
attr->rx_adv_fine_gran_step));
if (attr->rx_adv_min_actv_time_cap)
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(
RX_ADV_MIN_ACTIVATETIME_CAP, i),
attr->rx_adv_min_actv_time_cap);
if (attr->rx_adv_hibern8_time_cap)
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP,
i),
attr->rx_adv_hibern8_time_cap);
}
}
exynos_ufs_disable_ov_tm(hba);
}
static void exynos_ufs_establish_connt(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
enum {
DEV_ID = 0x00,
PEER_DEV_ID = 0x01,
PEER_CPORT_ID = 0x00,
TRAFFIC_CLASS = 0x00,
};
/* allow cport attributes to be set */
ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_IDLE);
/* local unipro attributes */
ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID), DEV_ID);
ufshcd_dme_set(hba, UIC_ARG_MIB(N_DEVICEID_VALID), true);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), PEER_DEV_ID);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), PEER_CPORT_ID);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), CPORT_DEF_FLAGS);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), TRAFFIC_CLASS);
ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), CPORT_CONNECTED);
}
static void exynos_ufs_config_smu(struct exynos_ufs *ufs)
{
u32 reg, val;
exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
/* make encryption disabled by default */
reg = ufsp_readl(ufs, UFSPRSECURITY);
ufsp_writel(ufs, reg | NSSMU, UFSPRSECURITY);
ufsp_writel(ufs, 0x0, UFSPSBEGIN0);
ufsp_writel(ufs, 0xffffffff, UFSPSEND0);
ufsp_writel(ufs, 0xff, UFSPSLUN0);
ufsp_writel(ufs, 0xf1, UFSPSCTRL0);
exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
}
static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
u8 g = max_t(u32, pwr->gear_rx, pwr->gear_tx);
u32 mask, sync_len;
enum {
SYNC_LEN_G1 = 80 * 1000, /* 80us */
SYNC_LEN_G2 = 40 * 1000, /* 44us */
SYNC_LEN_G3 = 20 * 1000, /* 20us */
};
int i;
if (g == 1)
sync_len = SYNC_LEN_G1;
else if (g == 2)
sync_len = SYNC_LEN_G2;
else if (g == 3)
sync_len = SYNC_LEN_G3;
else
return;
mask = exynos_ufs_calc_time_cntr(ufs, sync_len);
mask = (mask >> 8) & 0xff;
exynos_ufs_enable_ov_tm(hba);
for_each_ufs_rx_lane(ufs, i)
ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH, i), mask);
exynos_ufs_disable_ov_tm(hba);
}
static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
struct ufs_dev_params ufs_exynos_cap;
int ret;
if (!dev_req_params) {
pr_err("%s: incoming dev_req_params is NULL\n", __func__);
ret = -EINVAL;
goto out;
}
ufshcd_init_pwr_dev_param(&ufs_exynos_cap);
ret = ufshcd_get_pwr_dev_param(&ufs_exynos_cap,
dev_max_params, dev_req_params);
if (ret) {
pr_err("%s: failed to determine capabilities\n", __func__);
goto out;
}
if (ufs->drv_data->pre_pwr_change)
ufs->drv_data->pre_pwr_change(ufs, dev_req_params);
if (ufshcd_is_hs_mode(dev_req_params)) {
exynos_ufs_config_sync_pattern_mask(ufs, dev_req_params);
switch (dev_req_params->hs_rate) {
case PA_HS_MODE_A:
case PA_HS_MODE_B:
phy_calibrate(generic_phy);
break;
}
}
/* setting for three timeout values for traffic class #0 */
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
return 0;
out:
return ret;
}
#define PWR_MODE_STR_LEN 64
static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_req)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
int gear = max_t(u32, pwr_req->gear_rx, pwr_req->gear_tx);
int lanes = max_t(u32, pwr_req->lane_rx, pwr_req->lane_tx);
char pwr_str[PWR_MODE_STR_LEN] = "";
/* let default be PWM Gear 1, Lane 1 */
if (!gear)
gear = 1;
if (!lanes)
lanes = 1;
if (ufs->drv_data->post_pwr_change)
ufs->drv_data->post_pwr_change(ufs, pwr_req);
if ((ufshcd_is_hs_mode(pwr_req))) {
switch (pwr_req->hs_rate) {
case PA_HS_MODE_A:
case PA_HS_MODE_B:
phy_calibrate(generic_phy);
break;
}
snprintf(pwr_str, PWR_MODE_STR_LEN, "%s series_%s G_%d L_%d",
"FAST", pwr_req->hs_rate == PA_HS_MODE_A ? "A" : "B",
gear, lanes);
} else {
snprintf(pwr_str, PWR_MODE_STR_LEN, "%s G_%d L_%d",
"SLOW", gear, lanes);
}
dev_info(hba->dev, "Power mode changed to : %s\n", pwr_str);
return 0;
}
static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba,
int tag, bool is_scsi_cmd)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
u32 type;
type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE);
if (is_scsi_cmd)
hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE);
else
hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE);
}
static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba *hba,
int tag, u8 func)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
u32 type;
type = hci_readl(ufs, HCI_UTMRL_NEXUS_TYPE);
switch (func) {
case UFS_ABORT_TASK:
case UFS_QUERY_TASK:
hci_writel(ufs, type | (1 << tag), HCI_UTMRL_NEXUS_TYPE);
break;
case UFS_ABORT_TASK_SET:
case UFS_CLEAR_TASK_SET:
case UFS_LOGICAL_RESET:
case UFS_QUERY_TASK_SET:
hci_writel(ufs, type & ~(1 << tag), HCI_UTMRL_NEXUS_TYPE);
break;
}
}
static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
struct phy *generic_phy = ufs->phy;
int ret = 0;
if (ufs->avail_ln_rx == 0 || ufs->avail_ln_tx == 0) {
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILRXDATALANES),
&ufs->avail_ln_rx);
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_AVAILTXDATALANES),
&ufs->avail_ln_tx);
WARN(ufs->avail_ln_rx != ufs->avail_ln_tx,
"available data lane is not equal(rx:%d, tx:%d)\n",
ufs->avail_ln_rx, ufs->avail_ln_tx);
}
phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
ret = phy_init(generic_phy);
if (ret) {
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
__func__, ret);
return ret;
}
ret = phy_power_on(generic_phy);
if (ret)
goto out_exit_phy;
return 0;
out_exit_phy:
phy_exit(generic_phy);
return ret;
}
static void exynos_ufs_config_unipro(struct exynos_ufs *ufs)
{
struct ufs_hba *hba = ufs->hba;
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS),
ufs->drv_data->uic_attr->tx_trailingclks);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE),
ufs->drv_data->uic_attr->pa_dbg_option_suite);
}
static void exynos_ufs_config_intr(struct exynos_ufs *ufs, u32 errs, u8 index)
{
switch (index) {
case UNIPRO_L1_5:
hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_PA_LAYER);
break;
case UNIPRO_L2:
hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DL_LAYER);
break;
case UNIPRO_L3:
hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_N_LAYER);
break;
case UNIPRO_L4:
hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_T_LAYER);
break;
case UNIPRO_DME:
hci_writel(ufs, DFES_ERR_EN | errs, HCI_ERR_EN_DME_LAYER);
break;
}
}
static int exynos_ufs_setup_clocks(struct ufs_hba *hba, bool on,
enum ufs_notify_change_status status)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
if (!ufs)
return 0;
if (on && status == PRE_CHANGE) {
if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
exynos_ufs_disable_auto_ctrl_hcc(ufs);
exynos_ufs_ungate_clks(ufs);
} else if (!on && status == POST_CHANGE) {
exynos_ufs_gate_clks(ufs);
if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
exynos_ufs_enable_auto_ctrl_hcc(ufs);
}
return 0;
}
static int exynos_ufs_pre_link(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
/* hci */
exynos_ufs_config_intr(ufs, DFES_DEF_L2_ERRS, UNIPRO_L2);
exynos_ufs_config_intr(ufs, DFES_DEF_L3_ERRS, UNIPRO_L3);
exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
exynos_ufs_set_unipro_pclk_div(ufs);
/* unipro */
exynos_ufs_config_unipro(ufs);
/* m-phy */
exynos_ufs_phy_init(ufs);
if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
exynos_ufs_config_phy_time_attr(ufs);
exynos_ufs_config_phy_cap_attr(ufs);
}
exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
if (ufs->drv_data->pre_link)
ufs->drv_data->pre_link(ufs);
return 0;
}
static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs *ufs)
{
u32 val;
val = exynos_ufs_calc_time_cntr(ufs, IATOVAL_NSEC / CNTR_DIV_VAL);
hci_writel(ufs, val & CNT_VAL_1US_MASK, HCI_1US_TO_CNT_VAL);
}
static int exynos_ufs_post_link(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
exynos_ufs_establish_connt(ufs);
exynos_ufs_fit_aggr_timeout(ufs);
hci_writel(ufs, 0xa, HCI_DATA_REORDER);
hci_writel(ufs, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE);
hci_writel(ufs, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE);
hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
hci_writel(ufs, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN);
if (ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB)
ufshcd_dme_set(hba,
UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT), true);
if (attr->pa_granularity) {
exynos_ufs_enable_dbg_mode(hba);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_GRANULARITY),
attr->pa_granularity);
exynos_ufs_disable_dbg_mode(hba);
if (attr->pa_tactivate)
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
attr->pa_tactivate);
if (attr->pa_hibern8time &&
!(ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER))
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
attr->pa_hibern8time);
}
if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
if (!attr->pa_granularity)
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
&attr->pa_granularity);
if (!attr->pa_hibern8time)
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
&attr->pa_hibern8time);
/*
* not wait for HIBERN8 time to exit hibernation
*/
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 0);
if (attr->pa_granularity < 1 || attr->pa_granularity > 6) {
/* Valid range for granularity: 1 ~ 6 */
dev_warn(hba->dev,
"%s: pa_granularity %d is invalid, assuming backwards compatibility\n",
__func__,
attr->pa_granularity);
attr->pa_granularity = 6;
}
}
phy_calibrate(generic_phy);
if (ufs->drv_data->post_link)
ufs->drv_data->post_link(ufs);
return 0;
}
static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
{
struct device_node *np = dev->of_node;
struct exynos_ufs_uic_attr *attr;
int ret = 0;
ufs->drv_data = device_get_match_data(dev);
if (ufs->drv_data && ufs->drv_data->uic_attr) {
attr = ufs->drv_data->uic_attr;
} else {
dev_err(dev, "failed to get uic attributes\n");
ret = -EINVAL;
goto out;
}
ufs->sysreg = syscon_regmap_lookup_by_phandle(np, "samsung,sysreg");
if (IS_ERR(ufs->sysreg))
ufs->sysreg = NULL;
else {
if (of_property_read_u32_index(np, "samsung,sysreg", 1,
&ufs->shareability_reg_offset)) {
dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
}
}
ufs->pclk_avail_min = PCLK_AVAIL_MIN;
ufs->pclk_avail_max = PCLK_AVAIL_MAX;
attr->rx_adv_fine_gran_sup_en = RX_ADV_FINE_GRAN_SUP_EN;
attr->rx_adv_fine_gran_step = RX_ADV_FINE_GRAN_STEP_VAL;
attr->rx_adv_min_actv_time_cap = RX_ADV_MIN_ACTV_TIME_CAP;
attr->pa_granularity = PA_GRANULARITY_VAL;
attr->pa_tactivate = PA_TACTIVATE_VAL;
attr->pa_hibern8time = PA_HIBERN8TIME_VAL;
out:
return ret;
}
static inline void exynos_ufs_priv_init(struct ufs_hba *hba,
struct exynos_ufs *ufs)
{
ufs->hba = hba;
ufs->opts = ufs->drv_data->opts;
ufs->rx_sel_idx = PA_MAXDATALANES;
if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX)
ufs->rx_sel_idx = 0;
hba->priv = (void *)ufs;
hba->quirks = ufs->drv_data->quirks;
}
static int exynos_ufs_init(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
struct platform_device *pdev = to_platform_device(dev);
struct exynos_ufs *ufs;
int ret;
ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
if (!ufs)
return -ENOMEM;
/* exynos-specific hci */
ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
if (IS_ERR(ufs->reg_hci)) {
dev_err(dev, "cannot ioremap for hci vendor register\n");
return PTR_ERR(ufs->reg_hci);
}
/* unipro */
ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro");
if (IS_ERR(ufs->reg_unipro)) {
dev_err(dev, "cannot ioremap for unipro register\n");
return PTR_ERR(ufs->reg_unipro);
}
/* ufs protector */
ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp");
if (IS_ERR(ufs->reg_ufsp)) {
dev_err(dev, "cannot ioremap for ufs protector register\n");
return PTR_ERR(ufs->reg_ufsp);
}
ret = exynos_ufs_parse_dt(dev, ufs);
if (ret) {
dev_err(dev, "failed to get dt info.\n");
goto out;
}
ufs->phy = devm_phy_get(dev, "ufs-phy");
if (IS_ERR(ufs->phy)) {
ret = PTR_ERR(ufs->phy);
dev_err(dev, "failed to get ufs-phy\n");
goto out;
}
exynos_ufs_priv_init(hba, ufs);
if (ufs->drv_data->drv_init) {
ret = ufs->drv_data->drv_init(dev, ufs);
if (ret) {
dev_err(dev, "failed to init drv-data\n");
goto out;
}
}
ret = exynos_ufs_get_clk_info(ufs);
if (ret)
goto out;
exynos_ufs_specify_phy_time_attr(ufs);
exynos_ufs_config_smu(ufs);
return 0;
out:
hba->priv = NULL;
return ret;
}
static int exynos_ufs_host_reset(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 val;
int ret = 0;
exynos_ufs_disable_auto_ctrl_hcc_save(ufs, &val);
hci_writel(ufs, UFS_SW_RST_MASK, HCI_SW_RST);
do {
if (!(hci_readl(ufs, HCI_SW_RST) & UFS_SW_RST_MASK))
goto out;
} while (time_before(jiffies, timeout));
dev_err(hba->dev, "timeout host sw-reset\n");
ret = -ETIMEDOUT;
out:
exynos_ufs_auto_ctrl_hcc_restore(ufs, &val);
return ret;
}
static void exynos_ufs_dev_hw_reset(struct ufs_hba *hba)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
udelay(5);
hci_writel(ufs, 1 << 0, HCI_GPIO_OUT);
}
static void exynos_ufs_pre_hibern8(struct ufs_hba *hba, u8 enter)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
if (!enter) {
if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
exynos_ufs_disable_auto_ctrl_hcc(ufs);
exynos_ufs_ungate_clks(ufs);
if (ufs->opts & EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER) {
static const unsigned int granularity_tbl[] = {
1, 4, 8, 16, 32, 100
};
int h8_time = attr->pa_hibern8time *
granularity_tbl[attr->pa_granularity - 1];
unsigned long us;
s64 delta;
do {
delta = h8_time - ktime_us_delta(ktime_get(),
ufs->entry_hibern8_t);
if (delta <= 0)
break;
us = min_t(s64, delta, USEC_PER_MSEC);
if (us >= 10)
usleep_range(us, us + 10);
} while (1);
}
}
}
static void exynos_ufs_post_hibern8(struct ufs_hba *hba, u8 enter)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
if (!enter) {
u32 cur_mode = 0;
u32 pwrmode;
if (ufshcd_is_hs_mode(&ufs->dev_req_params))
pwrmode = FAST_MODE;
else
pwrmode = SLOW_MODE;
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &cur_mode);
if (cur_mode != (pwrmode << 4 | pwrmode)) {
dev_warn(hba->dev, "%s: power mode change\n", __func__);
hba->pwr_info.pwr_rx = (cur_mode >> 4) & 0xf;
hba->pwr_info.pwr_tx = cur_mode & 0xf;
ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
}
if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB))
exynos_ufs_establish_connt(ufs);
} else {
ufs->entry_hibern8_t = ktime_get();
exynos_ufs_gate_clks(ufs);
if (ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL)
exynos_ufs_enable_auto_ctrl_hcc(ufs);
}
}
static int exynos_ufs_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
int ret = 0;
switch (status) {
case PRE_CHANGE:
/*
* The maximum segment size must be set after scsi_host_alloc()
* has been called and before LUN scanning starts
* (ufshcd_async_scan()). Note: this callback may also be called
* from other functions than ufshcd_init().
*/
hba->host->max_segment_size = SZ_4K;
if (ufs->drv_data->pre_hce_enable) {
ret = ufs->drv_data->pre_hce_enable(ufs);
if (ret)
return ret;
}
ret = exynos_ufs_host_reset(hba);
if (ret)
return ret;
exynos_ufs_dev_hw_reset(hba);
break;
case POST_CHANGE:
exynos_ufs_calc_pwm_clk_div(ufs);
if (!(ufs->opts & EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL))
exynos_ufs_enable_auto_ctrl_hcc(ufs);
if (ufs->drv_data->post_hce_enable)
ret = ufs->drv_data->post_hce_enable(ufs);
break;
}
return ret;
}
static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int ret = 0;
switch (status) {
case PRE_CHANGE:
ret = exynos_ufs_pre_link(hba);
break;
case POST_CHANGE:
ret = exynos_ufs_post_link(hba);
break;
}
return ret;
}
static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
switch (status) {
case PRE_CHANGE:
ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params,
dev_req_params);
break;
case POST_CHANGE:
ret = exynos_ufs_post_pwr_mode(hba, dev_req_params);
break;
}
return ret;
}
static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
enum uic_cmd_dme enter,
enum ufs_notify_change_status notify)
{
switch ((u8)notify) {
case PRE_CHANGE:
exynos_ufs_pre_hibern8(hba, enter);
break;
case POST_CHANGE:
exynos_ufs_post_hibern8(hba, enter);
break;
}
}
static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
if (status == PRE_CHANGE)
return 0;
if (!ufshcd_is_link_active(hba))
phy_power_off(ufs->phy);
return 0;
}
static int exynos_ufs_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
if (!ufshcd_is_link_active(hba))
phy_power_on(ufs->phy);
exynos_ufs_config_smu(ufs);
return 0;
}
static int exynosauto_ufs_vh_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
if (status == POST_CHANGE) {
ufshcd_set_link_active(hba);
ufshcd_set_ufs_dev_active(hba);
}
return 0;
}
static int exynosauto_ufs_vh_wait_ph_ready(struct ufs_hba *hba)
{
u32 mbox;
ktime_t start, stop;
start = ktime_get();
stop = ktime_add(start, ms_to_ktime(PH_READY_TIMEOUT_MS));
do {
mbox = ufshcd_readl(hba, PH2VH_MBOX);
/* TODO: Mailbox message protocols between the PH and VHs are
* not implemented yet. This will be supported later
*/
if ((mbox & MH_MSG_MASK) == MH_MSG_PH_READY)
return 0;
usleep_range(40, 50);
} while (ktime_before(ktime_get(), stop));
return -ETIME;
}
static int exynosauto_ufs_vh_init(struct ufs_hba *hba)
{
struct device *dev = hba->dev;
struct platform_device *pdev = to_platform_device(dev);
struct exynos_ufs *ufs;
int ret;
ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
if (!ufs)
return -ENOMEM;
/* exynos-specific hci */
ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
if (IS_ERR(ufs->reg_hci)) {
dev_err(dev, "cannot ioremap for hci vendor register\n");
return PTR_ERR(ufs->reg_hci);
}
ret = exynosauto_ufs_vh_wait_ph_ready(hba);
if (ret)
return ret;
ufs->drv_data = device_get_match_data(dev);
if (!ufs->drv_data)
return -ENODEV;
exynos_ufs_priv_init(hba, ufs);
return 0;
}
static int fsd_ufs_pre_link(struct exynos_ufs *ufs)
{
int i;
struct ufs_hba *hba = ufs->hba;
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_CLK_PERIOD),
DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
ufshcd_dme_set(hba, UIC_ARG_MIB(0x201), 0x12);
ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
for_each_ufs_tx_lane(ufs, i) {
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xAA, i),
DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8F, i), 0x3F);
}
for_each_ufs_rx_lane(ufs, i) {
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x12, i),
DIV_ROUND_UP(NSEC_PER_SEC, ufs->mclk_rate));
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x5C, i), 0x38);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0F, i), 0x0);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x65, i), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x69, i), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x21, i), 0x0);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x22, i), 0x0);
}
ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_AUTOMODE_THLD), 0x4E20);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_OPTION_SUITE), 0x2e820183);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0x0);
exynos_ufs_establish_connt(ufs);
return 0;
}
static int fsd_ufs_post_link(struct exynos_ufs *ufs)
{
int i;
struct ufs_hba *hba = ufs->hba;
u32 hw_cap_min_tactivate;
u32 peer_rx_min_actv_time_cap;
u32 max_rx_hibern8_time_cap;
ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0x8F, 4),
&hw_cap_min_tactivate); /* HW Capability of MIN_TACTIVATE */
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
&peer_rx_min_actv_time_cap); /* PA_TActivate */
ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
&max_rx_hibern8_time_cap); /* PA_Hibern8Time */
if (peer_rx_min_actv_time_cap >= hw_cap_min_tactivate)
ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
peer_rx_min_actv_time_cap + 1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), max_rx_hibern8_time_cap + 1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x01);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_SAVECONFIGTIME), 0xFA);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_DBG_MODE), 0x00);
ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x40);
for_each_ufs_rx_lane(ufs, i) {
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x35, i), 0x05);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x73, i), 0x01);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x41, i), 0x02);
ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x42, i), 0xAC);
}
ufshcd_dme_set(hba, UIC_ARG_MIB(0x200), 0x0);
return 0;
}
static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
struct ufs_pa_layer_attr *pwr)
{
struct ufs_hba *hba = ufs->hba;
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), 0x1);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 12000);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 32000);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 16000);
unipro_writel(ufs, 12000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER0);
unipro_writel(ufs, 32000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER1);
unipro_writel(ufs, 16000, UNIPRO_DME_POWERMODE_REQ_REMOTEL2TIMER2);
return 0;
}
static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.name = "exynos_ufs",
.init = exynos_ufs_init,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
.link_startup_notify = exynos_ufs_link_startup_notify,
.pwr_change_notify = exynos_ufs_pwr_change_notify,
.setup_clocks = exynos_ufs_setup_clocks,
.setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req,
.setup_task_mgmt = exynos_ufs_specify_nexus_t_tm_req,
.hibern8_notify = exynos_ufs_hibern8_notify,
.suspend = exynos_ufs_suspend,
.resume = exynos_ufs_resume,
};
static struct ufs_hba_variant_ops ufs_hba_exynosauto_vh_ops = {
.name = "exynosauto_ufs_vh",
.init = exynosauto_ufs_vh_init,
.link_startup_notify = exynosauto_ufs_vh_link_startup_notify,
};
static int exynos_ufs_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
const struct ufs_hba_variant_ops *vops = &ufs_hba_exynos_ops;
const struct exynos_ufs_drv_data *drv_data =
device_get_match_data(dev);
if (drv_data && drv_data->vops)
vops = drv_data->vops;
err = ufshcd_pltfrm_init(pdev, vops);
if (err)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
return err;
}
static int exynos_ufs_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
phy_power_off(ufs->phy);
phy_exit(ufs->phy);
return 0;
}
static struct exynos_ufs_uic_attr exynos7_uic_attr = {
.tx_trailingclks = 0x10,
.tx_dif_p_nsec = 3000000, /* unit: ns */
.tx_dif_n_nsec = 1000000, /* unit: ns */
.tx_high_z_cnt_nsec = 20000, /* unit: ns */
.tx_base_unit_nsec = 100000, /* unit: ns */
.tx_gran_unit_nsec = 4000, /* unit: ns */
.tx_sleep_cnt = 1000, /* unit: ns */
.tx_min_activatetime = 0xa,
.rx_filler_enable = 0x2,
.rx_dif_p_nsec = 1000000, /* unit: ns */
.rx_hibern8_wait_nsec = 4000000, /* unit: ns */
.rx_base_unit_nsec = 100000, /* unit: ns */
.rx_gran_unit_nsec = 4000, /* unit: ns */
.rx_sleep_cnt = 1280, /* unit: ns */
.rx_stall_cnt = 320, /* unit: ns */
.rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
.rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
.rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
.rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
.rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
.rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
.pa_dbg_option_suite = 0x30103,
};
static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
.opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
.drv_init = exynosauto_ufs_drv_init,
.post_hce_enable = exynosauto_ufs_post_hce_enable,
.pre_link = exynosauto_ufs_pre_link,
.pre_pwr_change = exynosauto_ufs_pre_pwr_change,
.post_pwr_change = exynosauto_ufs_post_pwr_change,
};
static const struct exynos_ufs_drv_data exynosauto_ufs_vh_drvs = {
.vops = &ufs_hba_exynosauto_vh_ops,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
UFSHCI_QUIRK_BROKEN_HCE |
UFSHCD_QUIRK_BROKEN_UIC_CMD |
UFSHCD_QUIRK_SKIP_PH_CONFIGURATION |
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING,
.opts = EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
};
static const struct exynos_ufs_drv_data exynos_ufs_drvs = {
.uic_attr = &exynos7_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
UFSHCI_QUIRK_BROKEN_HCE |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
UFSHCD_QUIRK_4KB_DMA_ALIGNMENT,
.opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB |
EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER,
.drv_init = exynos7_ufs_drv_init,
.pre_link = exynos7_ufs_pre_link,
.post_link = exynos7_ufs_post_link,
.pre_pwr_change = exynos7_ufs_pre_pwr_change,
.post_pwr_change = exynos7_ufs_post_pwr_change,
};
static struct exynos_ufs_uic_attr fsd_uic_attr = {
.tx_trailingclks = 0x10,
.tx_dif_p_nsec = 3000000, /* unit: ns */
.tx_dif_n_nsec = 1000000, /* unit: ns */
.tx_high_z_cnt_nsec = 20000, /* unit: ns */
.tx_base_unit_nsec = 100000, /* unit: ns */
.tx_gran_unit_nsec = 4000, /* unit: ns */
.tx_sleep_cnt = 1000, /* unit: ns */
.tx_min_activatetime = 0xa,
.rx_filler_enable = 0x2,
.rx_dif_p_nsec = 1000000, /* unit: ns */
.rx_hibern8_wait_nsec = 4000000, /* unit: ns */
.rx_base_unit_nsec = 100000, /* unit: ns */
.rx_gran_unit_nsec = 4000, /* unit: ns */
.rx_sleep_cnt = 1280, /* unit: ns */
.rx_stall_cnt = 320, /* unit: ns */
.rx_hs_g1_sync_len_cap = SYNC_LEN_COARSE(0xf),
.rx_hs_g2_sync_len_cap = SYNC_LEN_COARSE(0xf),
.rx_hs_g3_sync_len_cap = SYNC_LEN_COARSE(0xf),
.rx_hs_g1_prep_sync_len_cap = PREP_LEN(0xf),
.rx_hs_g2_prep_sync_len_cap = PREP_LEN(0xf),
.rx_hs_g3_prep_sync_len_cap = PREP_LEN(0xf),
.pa_dbg_option_suite = 0x2E820183,
};
static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
.uic_attr = &fsd_uic_attr,
.quirks = UFSHCD_QUIRK_PRDT_BYTE_GRAN |
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR,
.opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
.pre_link = fsd_ufs_pre_link,
.post_link = fsd_ufs_post_link,
.pre_pwr_change = fsd_ufs_pre_pwr_change,
};
static const struct of_device_id exynos_ufs_of_match[] = {
{ .compatible = "samsung,exynos7-ufs",
.data = &exynos_ufs_drvs },
{ .compatible = "samsung,exynosautov9-ufs",
.data = &exynosauto_ufs_drvs },
{ .compatible = "samsung,exynosautov9-ufs-vh",
.data = &exynosauto_ufs_vh_drvs },
{ .compatible = "tesla,fsd-ufs",
.data = &fsd_ufs_drvs },
{},
};
static const struct dev_pm_ops exynos_ufs_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
};
static struct platform_driver exynos_ufs_pltform = {
.probe = exynos_ufs_probe,
.remove = exynos_ufs_remove,
.driver = {
.name = "exynos-ufshc",
.pm = &exynos_ufs_pm_ops,
.of_match_table = exynos_ufs_of_match,
},
};
module_platform_driver(exynos_ufs_pltform);
MODULE_AUTHOR("Alim Akhtar <[email protected]>");
MODULE_AUTHOR("Seungwon Jeon <[email protected]>");
MODULE_DESCRIPTION("Exynos UFS HCI Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/ufs/host/ufs-exynos.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Universal Flash Storage Host controller PCI glue driver
*
* Copyright (C) 2011-2013 Samsung India Software Operations
*
* Authors:
* Santosh Yaraganavi <[email protected]>
* Vinayak Holikatti <[email protected]>
*/
#include <ufs/ufshcd.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/debugfs.h>
#include <linux/uuid.h>
#include <linux/acpi.h>
#include <linux/gpio/consumer.h>
struct ufs_host {
void (*late_init)(struct ufs_hba *hba);
};
enum intel_ufs_dsm_func_id {
INTEL_DSM_FNS = 0,
INTEL_DSM_RESET = 1,
};
struct intel_host {
struct ufs_host ufs_host;
u32 dsm_fns;
u32 active_ltr;
u32 idle_ltr;
struct dentry *debugfs_root;
struct gpio_desc *reset_gpio;
};
static const guid_t intel_dsm_guid =
GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
static bool __intel_dsm_supported(struct intel_host *host,
enum intel_ufs_dsm_func_id fn)
{
return fn < 32 && fn >= 0 && (host->dsm_fns & (1u << fn));
}
#define INTEL_DSM_SUPPORTED(host, name) \
__intel_dsm_supported(host, INTEL_DSM_##name)
static int __intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
union acpi_object *obj;
int err = 0;
size_t len;
obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL);
if (!obj)
return -EOPNOTSUPP;
if (obj->type != ACPI_TYPE_BUFFER || obj->buffer.length < 1) {
err = -EINVAL;
goto out;
}
len = min_t(size_t, obj->buffer.length, 4);
*result = 0;
memcpy(result, obj->buffer.pointer, len);
out:
ACPI_FREE(obj);
return err;
}
static int intel_dsm(struct intel_host *intel_host, struct device *dev,
unsigned int fn, u32 *result)
{
if (!__intel_dsm_supported(intel_host, fn))
return -EOPNOTSUPP;
return __intel_dsm(intel_host, dev, fn, result);
}
static void intel_dsm_init(struct intel_host *intel_host, struct device *dev)
{
int err;
err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns);
dev_dbg(dev, "DSM fns %#x, error %d\n", intel_host->dsm_fns, err);
}
static int ufs_intel_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
/* Cannot enable ICE until after HC enable */
if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) {
u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE);
hce |= CRYPTO_GENERAL_ENABLE;
ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE);
}
return 0;
}
static int ufs_intel_disable_lcc(struct ufs_hba *hba)
{
u32 attr = UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE);
u32 lcc_enable = 0;
ufshcd_dme_get(hba, attr, &lcc_enable);
if (lcc_enable)
ufshcd_disable_host_tx_lcc(hba);
return 0;
}
static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
switch (status) {
case PRE_CHANGE:
err = ufs_intel_disable_lcc(hba);
break;
case POST_CHANGE:
break;
default:
break;
}
return err;
}
static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
{
struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
int ret;
pwr_info.lane_rx = lanes;
pwr_info.lane_tx = lanes;
ret = ufshcd_config_pwr_mode(hba, &pwr_info);
if (ret)
dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
__func__, lanes, ret);
return ret;
}
static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int err = 0;
switch (status) {
case PRE_CHANGE:
if (ufshcd_is_hs_mode(dev_max_params) &&
(hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
ufs_intel_set_lanes(hba, 2);
memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
break;
case POST_CHANGE:
if (ufshcd_is_hs_mode(dev_req_params)) {
u32 peer_granularity;
usleep_range(1000, 1250);
err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
&peer_granularity);
}
break;
default:
break;
}
return err;
}
static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
{
u32 granularity, peer_granularity;
u32 pa_tactivate, peer_pa_tactivate;
int ret;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
if (ret)
goto out;
ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
if (ret)
goto out;
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
if (ret)
goto out;
ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
if (ret)
goto out;
if (granularity == peer_granularity) {
u32 new_peer_pa_tactivate = pa_tactivate + 2;
ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
}
out:
return ret;
}
#define INTEL_ACTIVELTR 0x804
#define INTEL_IDLELTR 0x808
#define INTEL_LTR_REQ BIT(15)
#define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
#define INTEL_LTR_SCALE_1US (2 << 10)
#define INTEL_LTR_SCALE_32US (3 << 10)
#define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
static void intel_cache_ltr(struct ufs_hba *hba)
{
struct intel_host *host = ufshcd_get_variant(hba);
host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
}
static void intel_ltr_set(struct device *dev, s32 val)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct intel_host *host = ufshcd_get_variant(hba);
u32 ltr;
pm_runtime_get_sync(dev);
/*
* Program latency tolerance (LTR) accordingly what has been asked
* by the PM QoS layer or disable it in case we were passed
* negative value or PM_QOS_LATENCY_ANY.
*/
ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
if (val == PM_QOS_LATENCY_ANY || val < 0) {
ltr &= ~INTEL_LTR_REQ;
} else {
ltr |= INTEL_LTR_REQ;
ltr &= ~INTEL_LTR_SCALE_MASK;
ltr &= ~INTEL_LTR_VALUE_MASK;
if (val > INTEL_LTR_VALUE_MASK) {
val >>= 5;
if (val > INTEL_LTR_VALUE_MASK)
val = INTEL_LTR_VALUE_MASK;
ltr |= INTEL_LTR_SCALE_32US | val;
} else {
ltr |= INTEL_LTR_SCALE_1US | val;
}
}
if (ltr == host->active_ltr)
goto out;
writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
writel(ltr, hba->mmio_base + INTEL_IDLELTR);
/* Cache the values into intel_host structure */
intel_cache_ltr(hba);
out:
pm_runtime_put(dev);
}
static void intel_ltr_expose(struct device *dev)
{
dev->power.set_latency_tolerance = intel_ltr_set;
dev_pm_qos_expose_latency_tolerance(dev);
}
static void intel_ltr_hide(struct device *dev)
{
dev_pm_qos_hide_latency_tolerance(dev);
dev->power.set_latency_tolerance = NULL;
}
static void intel_add_debugfs(struct ufs_hba *hba)
{
struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
struct intel_host *host = ufshcd_get_variant(hba);
intel_cache_ltr(hba);
host->debugfs_root = dir;
debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
}
static void intel_remove_debugfs(struct ufs_hba *hba)
{
struct intel_host *host = ufshcd_get_variant(hba);
debugfs_remove_recursive(host->debugfs_root);
}
static int ufs_intel_device_reset(struct ufs_hba *hba)
{
struct intel_host *host = ufshcd_get_variant(hba);
if (INTEL_DSM_SUPPORTED(host, RESET)) {
u32 result = 0;
int err;
err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result);
if (!err && !result)
err = -EIO;
if (err)
dev_err(hba->dev, "%s: DSM error %d result %u\n",
__func__, err, result);
return err;
}
if (!host->reset_gpio)
return -EOPNOTSUPP;
gpiod_set_value_cansleep(host->reset_gpio, 1);
usleep_range(10, 15);
gpiod_set_value_cansleep(host->reset_gpio, 0);
usleep_range(10, 15);
return 0;
}
static struct gpio_desc *ufs_intel_get_reset_gpio(struct device *dev)
{
/* GPIO in _DSD has active low setting */
return devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
}
static int ufs_intel_common_init(struct ufs_hba *hba)
{
struct intel_host *host;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
ufshcd_set_variant(hba, host);
intel_dsm_init(host, hba->dev);
if (INTEL_DSM_SUPPORTED(host, RESET)) {
if (hba->vops->device_reset)
hba->caps |= UFSHCD_CAP_DEEPSLEEP;
} else {
if (hba->vops->device_reset)
host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev);
if (IS_ERR(host->reset_gpio)) {
dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n",
__func__, PTR_ERR(host->reset_gpio));
host->reset_gpio = NULL;
}
if (host->reset_gpio) {
gpiod_set_value_cansleep(host->reset_gpio, 0);
hba->caps |= UFSHCD_CAP_DEEPSLEEP;
}
}
intel_ltr_expose(hba->dev);
intel_add_debugfs(hba);
return 0;
}
static void ufs_intel_common_exit(struct ufs_hba *hba)
{
intel_remove_debugfs(hba);
intel_ltr_hide(hba->dev);
}
static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
{
if (ufshcd_is_link_hibern8(hba)) {
int ret = ufshcd_uic_hibern8_exit(hba);
if (!ret) {
ufshcd_set_link_active(hba);
} else {
dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
__func__, ret);
/*
* Force reset and restore. Any other actions can lead
* to an unrecoverable state.
*/
ufshcd_set_link_off(hba);
}
}
return 0;
}
static int ufs_intel_ehl_init(struct ufs_hba *hba)
{
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
return ufs_intel_common_init(hba);
}
static void ufs_intel_lkf_late_init(struct ufs_hba *hba)
{
/* LKF always needs a full reset, so set PM accordingly */
if (hba->caps & UFSHCD_CAP_DEEPSLEEP) {
hba->spm_lvl = UFS_PM_LVL_6;
hba->rpm_lvl = UFS_PM_LVL_6;
} else {
hba->spm_lvl = UFS_PM_LVL_5;
hba->rpm_lvl = UFS_PM_LVL_5;
}
}
static int ufs_intel_lkf_init(struct ufs_hba *hba)
{
struct ufs_host *ufs_host;
int err;
hba->nop_out_timeout = 200;
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
hba->caps |= UFSHCD_CAP_CRYPTO;
err = ufs_intel_common_init(hba);
ufs_host = ufshcd_get_variant(hba);
ufs_host->late_init = ufs_intel_lkf_late_init;
return err;
}
static int ufs_intel_adl_init(struct ufs_hba *hba)
{
hba->nop_out_timeout = 200;
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
hba->caps |= UFSHCD_CAP_WB_EN;
return ufs_intel_common_init(hba);
}
static int ufs_intel_mtl_init(struct ufs_hba *hba)
{
hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN;
return ufs_intel_common_init(hba);
}
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_common_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
.resume = ufs_intel_resume,
};
static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_ehl_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
.resume = ufs_intel_resume,
};
static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_lkf_init,
.exit = ufs_intel_common_exit,
.hce_enable_notify = ufs_intel_hce_enable_notify,
.link_startup_notify = ufs_intel_link_startup_notify,
.pwr_change_notify = ufs_intel_lkf_pwr_change_notify,
.apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks,
.resume = ufs_intel_resume,
.device_reset = ufs_intel_device_reset,
};
static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_adl_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
.resume = ufs_intel_resume,
.device_reset = ufs_intel_device_reset,
};
static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_mtl_init,
.exit = ufs_intel_common_exit,
.hce_enable_notify = ufs_intel_hce_enable_notify,
.link_startup_notify = ufs_intel_link_startup_notify,
.resume = ufs_intel_resume,
.device_reset = ufs_intel_device_reset,
};
#ifdef CONFIG_PM_SLEEP
static int ufshcd_pci_restore(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
/* Force a full reset and restore */
ufshcd_set_link_off(hba);
return ufshcd_system_resume(dev);
}
#endif
/**
* ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
* data structure memory
* @pdev: pointer to PCI handle
*/
static void ufshcd_pci_remove(struct pci_dev *pdev)
{
struct ufs_hba *hba = pci_get_drvdata(pdev);
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
ufshcd_dealloc_host(hba);
}
/**
* ufshcd_pci_probe - probe routine of the driver
* @pdev: pointer to PCI device handle
* @id: PCI device id
*
* Return: 0 on success, non-zero value on failure.
*/
static int
ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ufs_host *ufs_host;
struct ufs_hba *hba;
void __iomem *mmio_base;
int err;
err = pcim_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "pcim_enable_device failed\n");
return err;
}
pci_set_master(pdev);
err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
if (err < 0) {
dev_err(&pdev->dev, "request and iomap failed\n");
return err;
}
mmio_base = pcim_iomap_table(pdev)[0];
err = ufshcd_alloc_host(&pdev->dev, &hba);
if (err) {
dev_err(&pdev->dev, "Allocation failed\n");
return err;
}
hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
ufshcd_dealloc_host(hba);
return err;
}
ufs_host = ufshcd_get_variant(hba);
if (ufs_host && ufs_host->late_init)
ufs_host->late_init(hba);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
return 0;
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
#ifdef CONFIG_PM_SLEEP
.suspend = ufshcd_system_suspend,
.resume = ufshcd_system_resume,
.freeze = ufshcd_system_suspend,
.thaw = ufshcd_system_resume,
.poweroff = ufshcd_system_suspend,
.restore = ufshcd_pci_restore,
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
#endif
};
static const struct pci_device_id ufshcd_pci_tbl[] = {
{ PCI_VENDOR_ID_REDHAT, 0x0013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VDEVICE(INTEL, 0x9DFA), (kernel_ulong_t)&ufs_intel_cnl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4B41), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x4B43), (kernel_ulong_t)&ufs_intel_ehl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x98FA), (kernel_ulong_t)&ufs_intel_lkf_hba_vops },
{ PCI_VDEVICE(INTEL, 0x51FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x54FF), (kernel_ulong_t)&ufs_intel_adl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x7E47), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0xA847), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ PCI_VDEVICE(INTEL, 0x7747), (kernel_ulong_t)&ufs_intel_mtl_hba_vops },
{ } /* terminate list */
};
MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
static struct pci_driver ufshcd_pci_driver = {
.name = UFSHCD,
.id_table = ufshcd_pci_tbl,
.probe = ufshcd_pci_probe,
.remove = ufshcd_pci_remove,
.driver = {
.pm = &ufshcd_pci_pm_ops
},
};
module_pci_driver(ufshcd_pci_driver);
MODULE_AUTHOR("Santosh Yaragnavi <[email protected]>");
MODULE_AUTHOR("Vinayak Holikatti <[email protected]>");
MODULE_DESCRIPTION("UFS host controller PCI glue driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/ufs/host/ufshcd-pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2005-2007 Kristian Hoegsberg <[email protected]>
*/
#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/crc-itu-t.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "core.h"
#define define_fw_printk_level(func, kern_level) \
void func(const struct fw_card *card, const char *fmt, ...) \
{ \
struct va_format vaf; \
va_list args; \
\
va_start(args, fmt); \
vaf.fmt = fmt; \
vaf.va = &args; \
printk(kern_level KBUILD_MODNAME " %s: %pV", \
dev_name(card->device), &vaf); \
va_end(args); \
}
define_fw_printk_level(fw_err, KERN_ERR);
define_fw_printk_level(fw_notice, KERN_NOTICE);
int fw_compute_block_crc(__be32 *block)
{
int length;
u16 crc;
length = (be32_to_cpu(block[0]) >> 16) & 0xff;
crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
*block |= cpu_to_be32(crc);
return length;
}
static DEFINE_MUTEX(card_mutex);
static LIST_HEAD(card_list);
static LIST_HEAD(descriptor_list);
static int descriptor_count;
static __be32 tmp_config_rom[256];
/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
static size_t config_rom_length = 1 + 4 + 1 + 1;
#define BIB_CRC(v) ((v) << 0)
#define BIB_CRC_LENGTH(v) ((v) << 16)
#define BIB_INFO_LENGTH(v) ((v) << 24)
#define BIB_BUS_NAME 0x31333934 /* "1394" */
#define BIB_LINK_SPEED(v) ((v) << 0)
#define BIB_GENERATION(v) ((v) << 4)
#define BIB_MAX_ROM(v) ((v) << 8)
#define BIB_MAX_RECEIVE(v) ((v) << 12)
#define BIB_CYC_CLK_ACC(v) ((v) << 16)
#define BIB_PMC ((1) << 27)
#define BIB_BMC ((1) << 28)
#define BIB_ISC ((1) << 29)
#define BIB_CMC ((1) << 30)
#define BIB_IRMC ((1) << 31)
#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
/*
* IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
* but we have to make it longer because there are many devices whose firmware
* is just too slow for that.
*/
#define DEFAULT_SPLIT_TIMEOUT (2 * 8000)
#define CANON_OUI 0x000085
static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
{
struct fw_descriptor *desc;
int i, j, k, length;
/*
* Initialize contents of config rom buffer. On the OHCI
* controller, block reads to the config rom accesses the host
* memory, but quadlet read access the hardware bus info block
* registers. That's just crack, but it means we should make
* sure the contents of bus info block in host memory matches
* the version stored in the OHCI registers.
*/
config_rom[0] = cpu_to_be32(
BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
config_rom[2] = cpu_to_be32(
BIB_LINK_SPEED(card->link_speed) |
BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
BIB_MAX_ROM(2) |
BIB_MAX_RECEIVE(card->max_receive) |
BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
config_rom[3] = cpu_to_be32(card->guid >> 32);
config_rom[4] = cpu_to_be32(card->guid);
/* Generate root directory. */
config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
i = 7;
j = 7 + descriptor_count;
/* Generate root directory entries for descriptors. */
list_for_each_entry (desc, &descriptor_list, link) {
if (desc->immediate > 0)
config_rom[i++] = cpu_to_be32(desc->immediate);
config_rom[i] = cpu_to_be32(desc->key | (j - i));
i++;
j += desc->length;
}
/* Update root directory length. */
config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
/* End of root directory, now copy in descriptors. */
list_for_each_entry (desc, &descriptor_list, link) {
for (k = 0; k < desc->length; k++)
config_rom[i + k] = cpu_to_be32(desc->data[k]);
i += desc->length;
}
/* Calculate CRCs for all blocks in the config rom. This
* assumes that CRC length and info length are identical for
* the bus info block, which is always the case for this
* implementation. */
for (i = 0; i < j; i += length + 1)
length = fw_compute_block_crc(config_rom + i);
WARN_ON(j != config_rom_length);
}
static void update_config_roms(void)
{
struct fw_card *card;
list_for_each_entry (card, &card_list, link) {
generate_config_rom(card, tmp_config_rom);
card->driver->set_config_rom(card, tmp_config_rom,
config_rom_length);
}
}
static size_t required_space(struct fw_descriptor *desc)
{
/* descriptor + entry into root dir + optional immediate entry */
return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
}
int fw_core_add_descriptor(struct fw_descriptor *desc)
{
size_t i;
int ret;
/*
* Check descriptor is valid; the length of all blocks in the
* descriptor has to add up to exactly the length of the
* block.
*/
i = 0;
while (i < desc->length)
i += (desc->data[i] >> 16) + 1;
if (i != desc->length)
return -EINVAL;
mutex_lock(&card_mutex);
if (config_rom_length + required_space(desc) > 256) {
ret = -EBUSY;
} else {
list_add_tail(&desc->link, &descriptor_list);
config_rom_length += required_space(desc);
descriptor_count++;
if (desc->immediate > 0)
descriptor_count++;
update_config_roms();
ret = 0;
}
mutex_unlock(&card_mutex);
return ret;
}
EXPORT_SYMBOL(fw_core_add_descriptor);
void fw_core_remove_descriptor(struct fw_descriptor *desc)
{
mutex_lock(&card_mutex);
list_del(&desc->link);
config_rom_length -= required_space(desc);
descriptor_count--;
if (desc->immediate > 0)
descriptor_count--;
update_config_roms();
mutex_unlock(&card_mutex);
}
EXPORT_SYMBOL(fw_core_remove_descriptor);
static int reset_bus(struct fw_card *card, bool short_reset)
{
int reg = short_reset ? 5 : 1;
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
return card->driver->update_phy_reg(card, reg, 0, bit);
}
void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
{
/* We don't try hard to sort out requests of long vs. short resets. */
card->br_short = short_reset;
/* Use an arbitrary short delay to combine multiple reset requests. */
fw_card_get(card);
if (!queue_delayed_work(fw_workqueue, &card->br_work,
delayed ? DIV_ROUND_UP(HZ, 100) : 0))
fw_card_put(card);
}
EXPORT_SYMBOL(fw_schedule_bus_reset);
static void br_work(struct work_struct *work)
{
struct fw_card *card = container_of(work, struct fw_card, br_work.work);
/* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
if (card->reset_jiffies != 0 &&
time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
fw_card_put(card);
return;
}
fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
FW_PHY_CONFIG_CURRENT_GAP_COUNT);
reset_bus(card, card->br_short);
fw_card_put(card);
}
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
int channel, bandwidth = 0;
if (!card->broadcast_channel_allocated) {
fw_iso_resource_manage(card, generation, 1ULL << 31,
&channel, &bandwidth, true);
if (channel != 31) {
fw_notice(card, "failed to allocate broadcast channel\n");
return;
}
card->broadcast_channel_allocated = true;
}
device_for_each_child(card->device, (void *)(long)generation,
fw_device_set_broadcast_channel);
}
static const char gap_count_table[] = {
63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
};
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
fw_card_get(card);
if (!schedule_delayed_work(&card->bm_work, delay))
fw_card_put(card);
}
static void bm_work(struct work_struct *work)
{
struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
struct fw_device *root_device, *irm_device;
struct fw_node *root_node;
int root_id, new_root_id, irm_id, bm_id, local_id;
int gap_count, generation, grace, rcode;
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
bool irm_is_1394_1995_only;
bool keep_this_irm;
__be32 transaction_data[2];
spin_lock_irq(&card->lock);
if (card->local_node == NULL) {
spin_unlock_irq(&card->lock);
goto out_put_card;
}
generation = card->generation;
root_node = card->root_node;
fw_node_get(root_node);
root_device = root_node->data;
root_device_is_running = root_device &&
atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
root_device_is_cmc = root_device && root_device->cmc;
irm_device = card->irm_node->data;
irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
(irm_device->config_rom[2] & 0x000000f0) == 0;
/* Canon MV5i works unreliably if it is not root node. */
keep_this_irm = irm_device && irm_device->config_rom &&
irm_device->config_rom[3] >> 8 == CANON_OUI;
root_id = root_node->node_id;
irm_id = card->irm_node->node_id;
local_id = card->local_node->node_id;
grace = time_after64(get_jiffies_64(),
card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
if ((is_next_generation(generation, card->bm_generation) &&
!card->bm_abdicate) ||
(card->bm_generation != generation && grace)) {
/*
* This first step is to figure out who is IRM and
* then try to become bus manager. If the IRM is not
* well defined (e.g. does not have an active link
* layer or does not responds to our lock request, we
* will have to do a little vigilante bus management.
* In that case, we do a goto into the gap count logic
* so that when we do the reset, we still optimize the
* gap count. That could well save a reset in the
* next generation.
*/
if (!card->irm_node->link_on) {
new_root_id = local_id;
fw_notice(card, "%s, making local node (%02x) root\n",
"IRM has link off", new_root_id);
goto pick_me;
}
if (irm_is_1394_1995_only && !keep_this_irm) {
new_root_id = local_id;
fw_notice(card, "%s, making local node (%02x) root\n",
"IRM is not 1394a compliant", new_root_id);
goto pick_me;
}
transaction_data[0] = cpu_to_be32(0x3f);
transaction_data[1] = cpu_to_be32(local_id);
spin_unlock_irq(&card->lock);
rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
transaction_data, 8);
if (rcode == RCODE_GENERATION)
/* Another bus reset, BM work has been rescheduled. */
goto out;
bm_id = be32_to_cpu(transaction_data[0]);
spin_lock_irq(&card->lock);
if (rcode == RCODE_COMPLETE && generation == card->generation)
card->bm_node_id =
bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
spin_unlock_irq(&card->lock);
if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
/* Somebody else is BM. Only act as IRM. */
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
goto out;
}
if (rcode == RCODE_SEND_ERROR) {
/*
* We have been unable to send the lock request due to
* some local problem. Let's try again later and hope
* that the problem has gone away by then.
*/
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
goto out;
}
spin_lock_irq(&card->lock);
if (rcode != RCODE_COMPLETE && !keep_this_irm) {
/*
* The lock request failed, maybe the IRM
* isn't really IRM capable after all. Let's
* do a bus reset and pick the local node as
* root, and thus, IRM.
*/
new_root_id = local_id;
fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
fw_rcode_string(rcode), new_root_id);
goto pick_me;
}
} else if (card->bm_generation != generation) {
/*
* We weren't BM in the last generation, and the last
* bus reset is less than 125ms ago. Reschedule this job.
*/
spin_unlock_irq(&card->lock);
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
goto out;
}
/*
* We're bus manager for this generation, so next step is to
* make sure we have an active cycle master and do gap count
* optimization.
*/
card->bm_generation = generation;
if (root_device == NULL) {
/*
* Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.
*/
new_root_id = local_id;
} else if (!root_device_is_running) {
/*
* If we haven't probed this device yet, bail out now
* and let's try again once that's done.
*/
spin_unlock_irq(&card->lock);
goto out;
} else if (root_device_is_cmc) {
/*
* We will send out a force root packet for this
* node as part of the gap count optimization.
*/
new_root_id = root_id;
} else {
/*
* Current root has an active link layer and we
* successfully read the config rom, but it's not
* cycle master capable.
*/
new_root_id = local_id;
}
pick_me:
/*
* Pick a gap count from 1394a table E-1. The table doesn't cover
* the typically much larger 1394b beta repeater delays though.
*/
if (!card->beta_repeaters_present &&
root_node->max_hops < ARRAY_SIZE(gap_count_table))
gap_count = gap_count_table[root_node->max_hops];
else
gap_count = 63;
/*
* Finally, figure out if we should do a reset or not. If we have
* done less than 5 resets with the same physical topology and we
* have either a new root or a new gap count setting, let's do it.
*/
if (card->bm_retries++ < 5 &&
(card->gap_count != gap_count || new_root_id != root_id))
do_reset = true;
spin_unlock_irq(&card->lock);
if (do_reset) {
fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
reset_bus(card, true);
/* Will allocate broadcast channel after the reset. */
goto out;
}
if (root_device_is_cmc) {
/*
* Make sure that the cycle master sends cycle start packets.
*/
transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
root_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_STATE_SET,
transaction_data, 4);
if (rcode == RCODE_GENERATION)
goto out;
}
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
out:
fw_node_put(root_node);
out_put_card:
fw_card_put(card);
}
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver,
struct device *device)
{
static atomic_t index = ATOMIC_INIT(-1);
card->index = atomic_inc_return(&index);
card->driver = driver;
card->device = device;
card->current_tlabel = 0;
card->tlabel_mask = 0;
card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
card->split_timeout_jiffies =
DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
card->color = 0;
card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
kref_init(&card->kref);
init_completion(&card->done);
INIT_LIST_HEAD(&card->transaction_list);
INIT_LIST_HEAD(&card->phy_receiver_list);
spin_lock_init(&card->lock);
card->local_node = NULL;
INIT_DELAYED_WORK(&card->br_work, br_work);
INIT_DELAYED_WORK(&card->bm_work, bm_work);
}
EXPORT_SYMBOL(fw_card_initialize);
int fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid)
{
int ret;
card->max_receive = max_receive;
card->link_speed = link_speed;
card->guid = guid;
mutex_lock(&card_mutex);
generate_config_rom(card, tmp_config_rom);
ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
if (ret == 0)
list_add_tail(&card->link, &card_list);
mutex_unlock(&card_mutex);
return ret;
}
EXPORT_SYMBOL(fw_card_add);
/*
* The next few functions implement a dummy driver that is used once a card
* driver shuts down an fw_card. This allows the driver to cleanly unload,
* as all IO to the card will be handled (and failed) by the dummy driver
* instead of calling into the module. Only functions for iso context
* shutdown still need to be provided by the card driver.
*
* .read/write_csr() should never be called anymore after the dummy driver
* was bound since they are only used within request handler context.
* .set_config_rom() is never called since the card is taken out of card_list
* before switching to the dummy driver.
*/
static int dummy_read_phy_reg(struct fw_card *card, int address)
{
return -ENODEV;
}
static int dummy_update_phy_reg(struct fw_card *card, int address,
int clear_bits, int set_bits)
{
return -ENODEV;
}
static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
{
packet->callback(packet, card, RCODE_CANCELLED);
}
static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
{
packet->callback(packet, card, RCODE_CANCELLED);
}
static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
{
return -ENOENT;
}
static int dummy_enable_phys_dma(struct fw_card *card,
int node_id, int generation)
{
return -ENODEV;
}
static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
int type, int channel, size_t header_size)
{
return ERR_PTR(-ENODEV);
}
static u32 dummy_read_csr(struct fw_card *card, int csr_offset)
{
return 0;
}
static void dummy_write_csr(struct fw_card *card, int csr_offset, u32 value)
{
}
static int dummy_start_iso(struct fw_iso_context *ctx,
s32 cycle, u32 sync, u32 tags)
{
return -ENODEV;
}
static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
{
return -ENODEV;
}
static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
struct fw_iso_buffer *buffer, unsigned long payload)
{
return -ENODEV;
}
static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
{
}
static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
{
return -ENODEV;
}
static const struct fw_card_driver dummy_driver_template = {
.read_phy_reg = dummy_read_phy_reg,
.update_phy_reg = dummy_update_phy_reg,
.send_request = dummy_send_request,
.send_response = dummy_send_response,
.cancel_packet = dummy_cancel_packet,
.enable_phys_dma = dummy_enable_phys_dma,
.read_csr = dummy_read_csr,
.write_csr = dummy_write_csr,
.allocate_iso_context = dummy_allocate_iso_context,
.start_iso = dummy_start_iso,
.set_iso_channels = dummy_set_iso_channels,
.queue_iso = dummy_queue_iso,
.flush_queue_iso = dummy_flush_queue_iso,
.flush_iso_completions = dummy_flush_iso_completions,
};
void fw_card_release(struct kref *kref)
{
struct fw_card *card = container_of(kref, struct fw_card, kref);
complete(&card->done);
}
EXPORT_SYMBOL_GPL(fw_card_release);
void fw_core_remove_card(struct fw_card *card)
{
struct fw_card_driver dummy_driver = dummy_driver_template;
unsigned long flags;
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
fw_schedule_bus_reset(card, false, true);
mutex_lock(&card_mutex);
list_del_init(&card->link);
mutex_unlock(&card_mutex);
/* Switch off most of the card driver interface. */
dummy_driver.free_iso_context = card->driver->free_iso_context;
dummy_driver.stop_iso = card->driver->stop_iso;
card->driver = &dummy_driver;
spin_lock_irqsave(&card->lock, flags);
fw_destroy_nodes(card);
spin_unlock_irqrestore(&card->lock, flags);
/* Wait for all users, especially device workqueue jobs, to finish. */
fw_card_put(card);
wait_for_completion(&card->done);
WARN_ON(!list_empty(&card->transaction_list));
}
EXPORT_SYMBOL(fw_core_remove_card);
/**
* fw_card_read_cycle_time: read from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region
* for controller card.
* @card: The instance of card for 1394 OHCI controller.
* @cycle_time: The mutual reference to value of cycle time for the read operation.
*
* Read value from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region for the given
* controller card. This function accesses the region without any lock primitives or IRQ mask.
* When returning successfully, the content of @value argument has value aligned to host endianness,
* formetted by CYCLE_TIME CSR Register of IEEE 1394 std.
*
* Context: Any context.
* Return:
* * 0 - Read successfully.
* * -ENODEV - The controller is unavailable due to being removed or unbound.
*/
int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time)
{
if (card->driver->read_csr == dummy_read_csr)
return -ENODEV;
// It's possible to switch to dummy driver between the above and the below. This is the best
// effort to return -ENODEV.
*cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
return 0;
}
EXPORT_SYMBOL_GPL(fw_card_read_cycle_time);
| linux-master | drivers/firewire/core-card.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// uapi_test.c - An application of Kunit to check layout of structures exposed to user space for
// FireWire subsystem.
//
// Copyright (c) 2023 Takashi Sakamoto
#include <kunit/test.h>
#include <linux/firewire-cdev.h>
// Known issue added at v2.6.27 kernel.
static void structure_layout_event_response(struct kunit *test)
{
#if defined(CONFIG_X86_32)
// 4 bytes alignment for aggregate type including 8 bytes storage types.
KUNIT_EXPECT_EQ(test, 20, sizeof(struct fw_cdev_event_response));
#else
// 8 bytes alignment for aggregate type including 8 bytes storage types.
KUNIT_EXPECT_EQ(test, 24, sizeof(struct fw_cdev_event_response));
#endif
KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_response, closure));
KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_response, type));
KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_response, rcode));
KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_response, length));
KUNIT_EXPECT_EQ(test, 20, offsetof(struct fw_cdev_event_response, data));
}
// Added at v6.5.
static void structure_layout_event_request3(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, 56, sizeof(struct fw_cdev_event_request3));
KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_request3, closure));
KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_request3, type));
KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_request3, tcode));
KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_request3, offset));
KUNIT_EXPECT_EQ(test, 24, offsetof(struct fw_cdev_event_request3, source_node_id));
KUNIT_EXPECT_EQ(test, 28, offsetof(struct fw_cdev_event_request3, destination_node_id));
KUNIT_EXPECT_EQ(test, 32, offsetof(struct fw_cdev_event_request3, card));
KUNIT_EXPECT_EQ(test, 36, offsetof(struct fw_cdev_event_request3, generation));
KUNIT_EXPECT_EQ(test, 40, offsetof(struct fw_cdev_event_request3, handle));
KUNIT_EXPECT_EQ(test, 44, offsetof(struct fw_cdev_event_request3, length));
KUNIT_EXPECT_EQ(test, 48, offsetof(struct fw_cdev_event_request3, tstamp));
KUNIT_EXPECT_EQ(test, 56, offsetof(struct fw_cdev_event_request3, data));
}
// Added at v6.5.
static void structure_layout_event_response2(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, 32, sizeof(struct fw_cdev_event_response2));
KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_response2, closure));
KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_response2, type));
KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_response2, rcode));
KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_response2, length));
KUNIT_EXPECT_EQ(test, 20, offsetof(struct fw_cdev_event_response2, request_tstamp));
KUNIT_EXPECT_EQ(test, 24, offsetof(struct fw_cdev_event_response2, response_tstamp));
KUNIT_EXPECT_EQ(test, 32, offsetof(struct fw_cdev_event_response2, data));
}
// Added at v6.5.
static void structure_layout_event_phy_packet2(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, 24, sizeof(struct fw_cdev_event_phy_packet2));
KUNIT_EXPECT_EQ(test, 0, offsetof(struct fw_cdev_event_phy_packet2, closure));
KUNIT_EXPECT_EQ(test, 8, offsetof(struct fw_cdev_event_phy_packet2, type));
KUNIT_EXPECT_EQ(test, 12, offsetof(struct fw_cdev_event_phy_packet2, rcode));
KUNIT_EXPECT_EQ(test, 16, offsetof(struct fw_cdev_event_phy_packet2, length));
KUNIT_EXPECT_EQ(test, 20, offsetof(struct fw_cdev_event_phy_packet2, tstamp));
KUNIT_EXPECT_EQ(test, 24, offsetof(struct fw_cdev_event_phy_packet2, data));
}
static struct kunit_case structure_layout_test_cases[] = {
KUNIT_CASE(structure_layout_event_response),
KUNIT_CASE(structure_layout_event_request3),
KUNIT_CASE(structure_layout_event_response2),
KUNIT_CASE(structure_layout_event_phy_packet2),
{}
};
static struct kunit_suite structure_layout_test_suite = {
.name = "firewire-uapi-structure-layout",
.test_cases = structure_layout_test_cases,
};
kunit_test_suite(structure_layout_test_suite);
MODULE_LICENSE("GPL");
| linux-master | drivers/firewire/uapi-test.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Incremental bus scan, based on bus topology
*
* Copyright (C) 2004-2006 Kristian Hoegsberg <[email protected]>
*/
#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "core.h"
#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
#define SELFID_PORT_CHILD 0x3
#define SELFID_PORT_PARENT 0x2
#define SELFID_PORT_NCONN 0x1
#define SELFID_PORT_NONE 0x0
static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
{
u32 q;
int port_type, shift, seq;
*total_port_count = 0;
*child_port_count = 0;
shift = 6;
q = *sid;
seq = 0;
while (1) {
port_type = (q >> shift) & 0x03;
switch (port_type) {
case SELFID_PORT_CHILD:
(*child_port_count)++;
fallthrough;
case SELFID_PORT_PARENT:
case SELFID_PORT_NCONN:
(*total_port_count)++;
fallthrough;
case SELFID_PORT_NONE:
break;
}
shift -= 2;
if (shift == 0) {
if (!SELF_ID_MORE_PACKETS(q))
return sid + 1;
shift = 16;
sid++;
q = *sid;
/*
* Check that the extra packets actually are
* extended self ID packets and that the
* sequence numbers in the extended self ID
* packets increase as expected.
*/
if (!SELF_ID_EXTENDED(q) ||
seq != SELF_ID_EXT_SEQUENCE(q))
return NULL;
seq++;
}
}
}
static int get_port_type(u32 *sid, int port_index)
{
int index, shift;
index = (port_index + 5) / 8;
shift = 16 - ((port_index + 5) & 7) * 2;
return (sid[index] >> shift) & 0x03;
}
static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
{
struct fw_node *node;
node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
if (node == NULL)
return NULL;
node->color = color;
node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
node->link_on = SELF_ID_LINK_ON(sid);
node->phy_speed = SELF_ID_PHY_SPEED(sid);
node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
node->port_count = port_count;
refcount_set(&node->ref_count, 1);
INIT_LIST_HEAD(&node->link);
return node;
}
/*
* Compute the maximum hop count for this node and it's children. The
* maximum hop count is the maximum number of connections between any
* two nodes in the subtree rooted at this node. We need this for
* setting the gap count. As we build the tree bottom up in
* build_tree() below, this is fairly easy to do: for each node we
* maintain the max hop count and the max depth, ie the number of hops
* to the furthest leaf. Computing the max hop count breaks down into
* two cases: either the path goes through this node, in which case
* the hop count is the sum of the two biggest child depths plus 2.
* Or it could be the case that the max hop path is entirely
* containted in a child tree, in which case the max hop count is just
* the max hop count of this child.
*/
static void update_hop_count(struct fw_node *node)
{
int depths[2] = { -1, -1 };
int max_child_hops = 0;
int i;
for (i = 0; i < node->port_count; i++) {
if (node->ports[i] == NULL)
continue;
if (node->ports[i]->max_hops > max_child_hops)
max_child_hops = node->ports[i]->max_hops;
if (node->ports[i]->max_depth > depths[0]) {
depths[1] = depths[0];
depths[0] = node->ports[i]->max_depth;
} else if (node->ports[i]->max_depth > depths[1])
depths[1] = node->ports[i]->max_depth;
}
node->max_depth = depths[0] + 1;
node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
}
static inline struct fw_node *fw_node(struct list_head *l)
{
return list_entry(l, struct fw_node, link);
}
/*
* This function builds the tree representation of the topology given
* by the self IDs from the latest bus reset. During the construction
* of the tree, the function checks that the self IDs are valid and
* internally consistent. On success this function returns the
* fw_node corresponding to the local card otherwise NULL.
*/
static struct fw_node *build_tree(struct fw_card *card,
u32 *sid, int self_id_count)
{
struct fw_node *node, *child, *local_node, *irm_node;
struct list_head stack, *h;
u32 *next_sid, *end, q;
int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
int gap_count;
bool beta_repeaters_present;
local_node = NULL;
node = NULL;
INIT_LIST_HEAD(&stack);
stack_depth = 0;
end = sid + self_id_count;
phy_id = 0;
irm_node = NULL;
gap_count = SELF_ID_GAP_COUNT(*sid);
beta_repeaters_present = false;
while (sid < end) {
next_sid = count_ports(sid, &port_count, &child_port_count);
if (next_sid == NULL) {
fw_err(card, "inconsistent extended self IDs\n");
return NULL;
}
q = *sid;
if (phy_id != SELF_ID_PHY_ID(q)) {
fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
phy_id, SELF_ID_PHY_ID(q));
return NULL;
}
if (child_port_count > stack_depth) {
fw_err(card, "topology stack underflow\n");
return NULL;
}
/*
* Seek back from the top of our stack to find the
* start of the child nodes for this node.
*/
for (i = 0, h = &stack; i < child_port_count; i++)
h = h->prev;
/*
* When the stack is empty, this yields an invalid value,
* but that pointer will never be dereferenced.
*/
child = fw_node(h);
node = fw_node_create(q, port_count, card->color);
if (node == NULL) {
fw_err(card, "out of memory while building topology\n");
return NULL;
}
if (phy_id == (card->node_id & 0x3f))
local_node = node;
if (SELF_ID_CONTENDER(q))
irm_node = node;
parent_count = 0;
for (i = 0; i < port_count; i++) {
switch (get_port_type(sid, i)) {
case SELFID_PORT_PARENT:
/*
* Who's your daddy? We dont know the
* parent node at this time, so we
* temporarily abuse node->color for
* remembering the entry in the
* node->ports array where the parent
* node should be. Later, when we
* handle the parent node, we fix up
* the reference.
*/
parent_count++;
node->color = i;
break;
case SELFID_PORT_CHILD:
node->ports[i] = child;
/*
* Fix up parent reference for this
* child node.
*/
child->ports[child->color] = node;
child->color = card->color;
child = fw_node(child->link.next);
break;
}
}
/*
* Check that the node reports exactly one parent
* port, except for the root, which of course should
* have no parents.
*/
if ((next_sid == end && parent_count != 0) ||
(next_sid < end && parent_count != 1)) {
fw_err(card, "parent port inconsistency for node %d: "
"parent_count=%d\n", phy_id, parent_count);
return NULL;
}
/* Pop the child nodes off the stack and push the new node. */
__list_del(h->prev, &stack);
list_add_tail(&node->link, &stack);
stack_depth += 1 - child_port_count;
if (node->phy_speed == SCODE_BETA &&
parent_count + child_port_count > 1)
beta_repeaters_present = true;
/*
* If PHYs report different gap counts, set an invalid count
* which will force a gap count reconfiguration and a reset.
*/
if (SELF_ID_GAP_COUNT(q) != gap_count)
gap_count = 0;
update_hop_count(node);
sid = next_sid;
phy_id++;
}
card->root_node = node;
card->irm_node = irm_node;
card->gap_count = gap_count;
card->beta_repeaters_present = beta_repeaters_present;
return local_node;
}
typedef void (*fw_node_callback_t)(struct fw_card * card,
struct fw_node * node,
struct fw_node * parent);
static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
fw_node_callback_t callback)
{
struct list_head list;
struct fw_node *node, *next, *child, *parent;
int i;
INIT_LIST_HEAD(&list);
fw_node_get(root);
list_add_tail(&root->link, &list);
parent = NULL;
list_for_each_entry(node, &list, link) {
node->color = card->color;
for (i = 0; i < node->port_count; i++) {
child = node->ports[i];
if (!child)
continue;
if (child->color == card->color)
parent = child;
else {
fw_node_get(child);
list_add_tail(&child->link, &list);
}
}
callback(card, node, parent);
}
list_for_each_entry_safe(node, next, &list, link)
fw_node_put(node);
}
static void report_lost_node(struct fw_card *card,
struct fw_node *node, struct fw_node *parent)
{
fw_node_event(card, node, FW_NODE_DESTROYED);
fw_node_put(node);
/* Topology has changed - reset bus manager retry counter */
card->bm_retries = 0;
}
static void report_found_node(struct fw_card *card,
struct fw_node *node, struct fw_node *parent)
{
int b_path = (node->phy_speed == SCODE_BETA);
if (parent != NULL) {
/* min() macro doesn't work here with gcc 3.4 */
node->max_speed = parent->max_speed < node->phy_speed ?
parent->max_speed : node->phy_speed;
node->b_path = parent->b_path && b_path;
} else {
node->max_speed = node->phy_speed;
node->b_path = b_path;
}
fw_node_event(card, node, FW_NODE_CREATED);
/* Topology has changed - reset bus manager retry counter */
card->bm_retries = 0;
}
/* Must be called with card->lock held */
void fw_destroy_nodes(struct fw_card *card)
{
card->color++;
if (card->local_node != NULL)
for_each_fw_node(card, card->local_node, report_lost_node);
card->local_node = NULL;
}
static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
{
struct fw_node *tree;
int i;
tree = node1->ports[port];
node0->ports[port] = tree;
for (i = 0; i < tree->port_count; i++) {
if (tree->ports[i] == node1) {
tree->ports[i] = node0;
break;
}
}
}
/*
* Compare the old topology tree for card with the new one specified by root.
* Queue the nodes and mark them as either found, lost or updated.
* Update the nodes in the card topology tree as we go.
*/
static void update_tree(struct fw_card *card, struct fw_node *root)
{
struct list_head list0, list1;
struct fw_node *node0, *node1, *next1;
int i, event;
INIT_LIST_HEAD(&list0);
list_add_tail(&card->local_node->link, &list0);
INIT_LIST_HEAD(&list1);
list_add_tail(&root->link, &list1);
node0 = fw_node(list0.next);
node1 = fw_node(list1.next);
while (&node0->link != &list0) {
WARN_ON(node0->port_count != node1->port_count);
if (node0->link_on && !node1->link_on)
event = FW_NODE_LINK_OFF;
else if (!node0->link_on && node1->link_on)
event = FW_NODE_LINK_ON;
else if (node1->initiated_reset && node1->link_on)
event = FW_NODE_INITIATED_RESET;
else
event = FW_NODE_UPDATED;
node0->node_id = node1->node_id;
node0->color = card->color;
node0->link_on = node1->link_on;
node0->initiated_reset = node1->initiated_reset;
node0->max_hops = node1->max_hops;
node1->color = card->color;
fw_node_event(card, node0, event);
if (card->root_node == node1)
card->root_node = node0;
if (card->irm_node == node1)
card->irm_node = node0;
for (i = 0; i < node0->port_count; i++) {
if (node0->ports[i] && node1->ports[i]) {
/*
* This port didn't change, queue the
* connected node for further
* investigation.
*/
if (node0->ports[i]->color == card->color)
continue;
list_add_tail(&node0->ports[i]->link, &list0);
list_add_tail(&node1->ports[i]->link, &list1);
} else if (node0->ports[i]) {
/*
* The nodes connected here were
* unplugged; unref the lost nodes and
* queue FW_NODE_LOST callbacks for
* them.
*/
for_each_fw_node(card, node0->ports[i],
report_lost_node);
node0->ports[i] = NULL;
} else if (node1->ports[i]) {
/*
* One or more node were connected to
* this port. Move the new nodes into
* the tree and queue FW_NODE_CREATED
* callbacks for them.
*/
move_tree(node0, node1, i);
for_each_fw_node(card, node0->ports[i],
report_found_node);
}
}
node0 = fw_node(node0->link.next);
next1 = fw_node(node1->link.next);
fw_node_put(node1);
node1 = next1;
}
}
static void update_topology_map(struct fw_card *card,
u32 *self_ids, int self_id_count)
{
int node_count = (card->root_node->node_id & 0x3f) + 1;
__be32 *map = card->topology_map;
*map++ = cpu_to_be32((self_id_count + 2) << 16);
*map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
*map++ = cpu_to_be32((node_count << 16) | self_id_count);
while (self_id_count--)
*map++ = cpu_to_be32p(self_ids++);
fw_compute_block_crc(card->topology_map);
}
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
int self_id_count, u32 *self_ids, bool bm_abdicate)
{
struct fw_node *local_node;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
/*
* If the selfID buffer is not the immediate successor of the
* previously processed one, we cannot reliably compare the
* old and new topologies.
*/
if (!is_next_generation(generation, card->generation) &&
card->local_node != NULL) {
fw_destroy_nodes(card);
card->bm_retries = 0;
}
card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
card->node_id = node_id;
/*
* Update node_id before generation to prevent anybody from using
* a stale node_id together with a current generation.
*/
smp_wmb();
card->generation = generation;
card->reset_jiffies = get_jiffies_64();
card->bm_node_id = 0xffff;
card->bm_abdicate = bm_abdicate;
fw_schedule_bm_work(card, 0);
local_node = build_tree(card, self_ids, self_id_count);
update_topology_map(card, self_ids, self_id_count);
card->color++;
if (local_node == NULL) {
fw_err(card, "topology build failed\n");
/* FIXME: We need to issue a bus reset in this case. */
} else if (card->local_node == NULL) {
card->local_node = local_node;
for_each_fw_node(card, local_node, report_found_node);
} else {
update_tree(card, local_node);
}
spin_unlock_irqrestore(&card->lock, flags);
}
EXPORT_SYMBOL(fw_core_handle_bus_reset);
| linux-master | drivers/firewire/core-topology.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Isochronous I/O functionality:
* - Isochronous DMA context management
* - Isochronous bus resource management (channels, bandwidth), client side
*
* Copyright (C) 2006 Kristian Hoegsberg <[email protected]>
*/
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <asm/byteorder.h>
#include "core.h"
/*
* Isochronous DMA context management
*/
int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count)
{
int i;
buffer->page_count = 0;
buffer->page_count_mapped = 0;
buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]),
GFP_KERNEL);
if (buffer->pages == NULL)
return -ENOMEM;
for (i = 0; i < page_count; i++) {
buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (buffer->pages[i] == NULL)
break;
}
buffer->page_count = i;
if (i < page_count) {
fw_iso_buffer_destroy(buffer, NULL);
return -ENOMEM;
}
return 0;
}
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction)
{
dma_addr_t address;
int i;
buffer->direction = direction;
for (i = 0; i < buffer->page_count; i++) {
address = dma_map_page(card->device, buffer->pages[i],
0, PAGE_SIZE, direction);
if (dma_mapping_error(card->device, address))
break;
set_page_private(buffer->pages[i], address);
}
buffer->page_count_mapped = i;
if (i < buffer->page_count)
return -ENOMEM;
return 0;
}
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction)
{
int ret;
ret = fw_iso_buffer_alloc(buffer, page_count);
if (ret < 0)
return ret;
ret = fw_iso_buffer_map_dma(buffer, card, direction);
if (ret < 0)
fw_iso_buffer_destroy(buffer, card);
return ret;
}
EXPORT_SYMBOL(fw_iso_buffer_init);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
struct fw_card *card)
{
int i;
dma_addr_t address;
for (i = 0; i < buffer->page_count_mapped; i++) {
address = page_private(buffer->pages[i]);
dma_unmap_page(card->device, address,
PAGE_SIZE, buffer->direction);
}
for (i = 0; i < buffer->page_count; i++)
__free_page(buffer->pages[i]);
kfree(buffer->pages);
buffer->pages = NULL;
buffer->page_count = 0;
buffer->page_count_mapped = 0;
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
/* Convert DMA address to offset into virtually contiguous buffer. */
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
{
size_t i;
dma_addr_t address;
ssize_t offset;
for (i = 0; i < buffer->page_count; i++) {
address = page_private(buffer->pages[i]);
offset = (ssize_t)completed - (ssize_t)address;
if (offset > 0 && offset <= PAGE_SIZE)
return (i << PAGE_SHIFT) + offset;
}
return 0;
}
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data)
{
struct fw_iso_context *ctx;
ctx = card->driver->allocate_iso_context(card,
type, channel, header_size);
if (IS_ERR(ctx))
return ctx;
ctx->card = card;
ctx->type = type;
ctx->channel = channel;
ctx->speed = speed;
ctx->header_size = header_size;
ctx->callback.sc = callback;
ctx->callback_data = callback_data;
return ctx;
}
EXPORT_SYMBOL(fw_iso_context_create);
void fw_iso_context_destroy(struct fw_iso_context *ctx)
{
ctx->card->driver->free_iso_context(ctx);
}
EXPORT_SYMBOL(fw_iso_context_destroy);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags)
{
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
EXPORT_SYMBOL(fw_iso_context_start);
int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels)
{
return ctx->card->driver->set_iso_channels(ctx, channels);
}
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
return ctx->card->driver->queue_iso(ctx, packet, buffer, payload);
}
EXPORT_SYMBOL(fw_iso_context_queue);
void fw_iso_context_queue_flush(struct fw_iso_context *ctx)
{
ctx->card->driver->flush_queue_iso(ctx);
}
EXPORT_SYMBOL(fw_iso_context_queue_flush);
int fw_iso_context_flush_completions(struct fw_iso_context *ctx)
{
return ctx->card->driver->flush_iso_completions(ctx);
}
EXPORT_SYMBOL(fw_iso_context_flush_completions);
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);
}
EXPORT_SYMBOL(fw_iso_context_stop);
/*
* Isochronous bus resource management (channels, bandwidth), client side
*/
static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
int bandwidth, bool allocate)
{
int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
__be32 data[2];
/*
* On a 1394a IRM with low contention, try < 1 is enough.
* On a 1394-1995 IRM, we need at least try < 2.
* Let's just do try < 5.
*/
for (try = 0; try < 5; try++) {
new = allocate ? old - bandwidth : old + bandwidth;
if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
return -EBUSY;
data[0] = cpu_to_be32(old);
data[1] = cpu_to_be32(new);
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
data, 8)) {
case RCODE_GENERATION:
/* A generation change frees all bandwidth. */
return allocate ? -EAGAIN : bandwidth;
case RCODE_COMPLETE:
if (be32_to_cpup(data) == old)
return bandwidth;
old = be32_to_cpup(data);
/* Fall through. */
}
}
return -EIO;
}
static int manage_channel(struct fw_card *card, int irm_id, int generation,
u32 channels_mask, u64 offset, bool allocate)
{
__be32 bit, all, old;
__be32 data[2];
int channel, ret = -EIO, retry = 5;
old = all = allocate ? cpu_to_be32(~0) : 0;
for (channel = 0; channel < 32; channel++) {
if (!(channels_mask & 1 << channel))
continue;
ret = -EBUSY;
bit = cpu_to_be32(1 << (31 - channel));
if ((old & bit) != (all & bit))
continue;
data[0] = old;
data[1] = old ^ bit;
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
offset, data, 8)) {
case RCODE_GENERATION:
/* A generation change frees all channels. */
return allocate ? -EAGAIN : channel;
case RCODE_COMPLETE:
if (data[0] == old)
return channel;
old = data[0];
/* Is the IRM 1394a-2000 compliant? */
if ((data[0] & bit) == (data[1] & bit))
continue;
fallthrough; /* It's a 1394-1995 IRM, retry */
default:
if (retry) {
retry--;
channel--;
} else {
ret = -EIO;
}
}
}
return ret;
}
static void deallocate_channel(struct fw_card *card, int irm_id,
int generation, int channel)
{
u32 mask;
u64 offset;
mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
manage_channel(card, irm_id, generation, mask, offset, false);
}
/**
* fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth
* @card: card interface for this action
* @generation: bus generation
* @channels_mask: bitmask for channel allocation
* @channel: pointer for returning channel allocation result
* @bandwidth: pointer for returning bandwidth allocation result
* @allocate: whether to allocate (true) or deallocate (false)
*
* In parameters: card, generation, channels_mask, bandwidth, allocate
* Out parameters: channel, bandwidth
*
* This function blocks (sleeps) during communication with the IRM.
*
* Allocates or deallocates at most one channel out of channels_mask.
* channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
* (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
* channel 0 and LSB for channel 63.)
* Allocates or deallocates as many bandwidth allocation units as specified.
*
* Returns channel < 0 if no channel was allocated or deallocated.
* Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
*
* If generation is stale, deallocations succeed but allocations fail with
* channel = -EAGAIN.
*
* If channel allocation fails, no bandwidth will be allocated either.
* If bandwidth allocation fails, no channel will be allocated either.
* But deallocations of channel and bandwidth are tried independently
* of each other's success.
*/
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth,
bool allocate)
{
u32 channels_hi = channels_mask; /* channels 31...0 */
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
int irm_id, ret, c = -EINVAL;
spin_lock_irq(&card->lock);
irm_id = card->irm_node->node_id;
spin_unlock_irq(&card->lock);
if (channels_hi)
c = manage_channel(card, irm_id, generation, channels_hi,
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI,
allocate);
if (channels_lo && c < 0) {
c = manage_channel(card, irm_id, generation, channels_lo,
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO,
allocate);
if (c >= 0)
c += 32;
}
*channel = c;
if (allocate && channels_mask != 0 && c < 0)
*bandwidth = 0;
if (*bandwidth == 0)
return;
ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
if (ret < 0)
*bandwidth = 0;
if (allocate && ret < 0) {
if (c >= 0)
deallocate_channel(card, irm_id, generation, c);
*channel = ret;
}
}
EXPORT_SYMBOL(fw_iso_resource_manage);
| linux-master | drivers/firewire/core-iso.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SBP2 driver (SCSI over IEEE1394)
*
* Copyright (C) 2005-2007 Kristian Hoegsberg <[email protected]>
*/
/*
* The basic structure of this driver is based on the old storage driver,
* drivers/ieee1394/sbp2.c, originally written by
* James Goodwin <[email protected]>
* with later contributions and ongoing maintenance from
* Ben Collins <[email protected]>,
* Stefan Richter <[email protected]>
* and many others.
*/
#include <linux/blkdev.h>
#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/stringify.h>
#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
/*
* So far only bridges from Oxford Semiconductor are known to support
* concurrent logins. Depending on firmware, four or two concurrent logins
* are possible on OXFW911 and newer Oxsemi bridges.
*
* Concurrent logins are useful together with cluster filesystems.
*/
static bool sbp2_param_exclusive_login = 1;
module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
"(default = Y, use N for concurrent initiators)");
/*
* Flags for firmware oddities
*
* - 128kB max transfer
* Limit transfer size. Necessary for some old bridges.
*
* - 36 byte inquiry
* When scsi_mod probes the device, let the inquiry command look like that
* from MS Windows.
*
* - skip mode page 8
* Suppress sending of mode_sense for mode page 8 if the device pretends to
* support the SCSI Primary Block commands instead of Reduced Block Commands.
*
* - fix capacity
* Tell sd_mod to correct the last sector number reported by read_capacity.
* Avoids access beyond actual disk limits on devices with an off-by-one bug.
* Don't use this with devices which don't have this bug.
*
* - delay inquiry
* Wait extra SBP2_INQUIRY_DELAY seconds after login before SCSI inquiry.
*
* - power condition
* Set the power condition field in the START STOP UNIT commands sent by
* sd_mod on suspend, resume, and shutdown (if manage_start_stop is on).
* Some disks need this to spin down or to resume properly.
*
* - override internal blacklist
* Instead of adding to the built-in blacklist, use only the workarounds
* specified in the module load parameter.
* Useful if a blacklist entry interfered with a non-broken device.
*/
#define SBP2_WORKAROUND_128K_MAX_TRANS 0x1
#define SBP2_WORKAROUND_INQUIRY_36 0x2
#define SBP2_WORKAROUND_MODE_SENSE_8 0x4
#define SBP2_WORKAROUND_FIX_CAPACITY 0x8
#define SBP2_WORKAROUND_DELAY_INQUIRY 0x10
#define SBP2_INQUIRY_DELAY 12
#define SBP2_WORKAROUND_POWER_CONDITION 0x20
#define SBP2_WORKAROUND_OVERRIDE 0x100
static int sbp2_param_workarounds;
module_param_named(workarounds, sbp2_param_workarounds, int, 0644);
MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0"
", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS)
", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36)
", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8)
", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY)
", delay inquiry = " __stringify(SBP2_WORKAROUND_DELAY_INQUIRY)
", set power condition in start stop unit = "
__stringify(SBP2_WORKAROUND_POWER_CONDITION)
", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE)
", or a combination)");
/*
* We create one struct sbp2_logical_unit per SBP-2 Logical Unit Number Entry
* and one struct scsi_device per sbp2_logical_unit.
*/
struct sbp2_logical_unit {
struct sbp2_target *tgt;
struct list_head link;
struct fw_address_handler address_handler;
struct list_head orb_list;
u64 command_block_agent_address;
u16 lun;
int login_id;
/*
* The generation is updated once we've logged in or reconnected
* to the logical unit. Thus, I/O to the device will automatically
* fail and get retried if it happens in a window where the device
* is not ready, e.g. after a bus reset but before we reconnect.
*/
int generation;
int retries;
work_func_t workfn;
struct delayed_work work;
bool has_sdev;
bool blocked;
};
static void sbp2_queue_work(struct sbp2_logical_unit *lu, unsigned long delay)
{
queue_delayed_work(fw_workqueue, &lu->work, delay);
}
/*
* We create one struct sbp2_target per IEEE 1212 Unit Directory
* and one struct Scsi_Host per sbp2_target.
*/
struct sbp2_target {
struct fw_unit *unit;
struct list_head lu_list;
u64 management_agent_address;
u64 guid;
int directory_id;
int node_id;
int address_high;
unsigned int workarounds;
unsigned int mgt_orb_timeout;
unsigned int max_payload;
spinlock_t lock;
int dont_block; /* counter for each logical unit */
int blocked; /* ditto */
};
static struct fw_device *target_parent_device(struct sbp2_target *tgt)
{
return fw_parent_device(tgt->unit);
}
static const struct device *tgt_dev(const struct sbp2_target *tgt)
{
return &tgt->unit->device;
}
static const struct device *lu_dev(const struct sbp2_logical_unit *lu)
{
return &lu->tgt->unit->device;
}
/* Impossible login_id, to detect logout attempt before successful login */
#define INVALID_LOGIN_ID 0x10000
#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
#define SBP2_ORB_NULL 0x80000000
#define SBP2_RETRY_LIMIT 0xf /* 15 retries */
#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
/*
* There is no transport protocol limit to the CDB length, but we implement
* a fixed length only. 16 bytes is enough for disks larger than 2 TB.
*/
#define SBP2_MAX_CDB_SIZE 16
/*
* The maximum SBP-2 data buffer size is 0xffff. We quadlet-align this
* for compatibility with earlier versions of this driver.
*/
#define SBP2_MAX_SEG_SIZE 0xfffc
/* Unit directory keys */
#define SBP2_CSR_UNIT_CHARACTERISTICS 0x3a
#define SBP2_CSR_FIRMWARE_REVISION 0x3c
#define SBP2_CSR_LOGICAL_UNIT_NUMBER 0x14
#define SBP2_CSR_UNIT_UNIQUE_ID 0x8d
#define SBP2_CSR_LOGICAL_UNIT_DIRECTORY 0xd4
/* Management orb opcodes */
#define SBP2_LOGIN_REQUEST 0x0
#define SBP2_QUERY_LOGINS_REQUEST 0x1
#define SBP2_RECONNECT_REQUEST 0x3
#define SBP2_SET_PASSWORD_REQUEST 0x4
#define SBP2_LOGOUT_REQUEST 0x7
#define SBP2_ABORT_TASK_REQUEST 0xb
#define SBP2_ABORT_TASK_SET 0xc
#define SBP2_LOGICAL_UNIT_RESET 0xe
#define SBP2_TARGET_RESET_REQUEST 0xf
/* Offsets for command block agent registers */
#define SBP2_AGENT_STATE 0x00
#define SBP2_AGENT_RESET 0x04
#define SBP2_ORB_POINTER 0x08
#define SBP2_DOORBELL 0x10
#define SBP2_UNSOLICITED_STATUS_ENABLE 0x14
/* Status write response codes */
#define SBP2_STATUS_REQUEST_COMPLETE 0x0
#define SBP2_STATUS_TRANSPORT_FAILURE 0x1
#define SBP2_STATUS_ILLEGAL_REQUEST 0x2
#define SBP2_STATUS_VENDOR_DEPENDENT 0x3
#define STATUS_GET_ORB_HIGH(v) ((v).status & 0xffff)
#define STATUS_GET_SBP_STATUS(v) (((v).status >> 16) & 0xff)
#define STATUS_GET_LEN(v) (((v).status >> 24) & 0x07)
#define STATUS_GET_DEAD(v) (((v).status >> 27) & 0x01)
#define STATUS_GET_RESPONSE(v) (((v).status >> 28) & 0x03)
#define STATUS_GET_SOURCE(v) (((v).status >> 30) & 0x03)
#define STATUS_GET_ORB_LOW(v) ((v).orb_low)
#define STATUS_GET_DATA(v) ((v).data)
struct sbp2_status {
u32 status;
u32 orb_low;
u8 data[24];
};
struct sbp2_pointer {
__be32 high;
__be32 low;
};
struct sbp2_orb {
struct fw_transaction t;
struct kref kref;
dma_addr_t request_bus;
int rcode;
void (*callback)(struct sbp2_orb * orb, struct sbp2_status * status);
struct sbp2_logical_unit *lu;
struct list_head link;
};
#define MANAGEMENT_ORB_LUN(v) ((v))
#define MANAGEMENT_ORB_FUNCTION(v) ((v) << 16)
#define MANAGEMENT_ORB_RECONNECT(v) ((v) << 20)
#define MANAGEMENT_ORB_EXCLUSIVE(v) ((v) ? 1 << 28 : 0)
#define MANAGEMENT_ORB_REQUEST_FORMAT(v) ((v) << 29)
#define MANAGEMENT_ORB_NOTIFY ((1) << 31)
#define MANAGEMENT_ORB_RESPONSE_LENGTH(v) ((v))
#define MANAGEMENT_ORB_PASSWORD_LENGTH(v) ((v) << 16)
struct sbp2_management_orb {
struct sbp2_orb base;
struct {
struct sbp2_pointer password;
struct sbp2_pointer response;
__be32 misc;
__be32 length;
struct sbp2_pointer status_fifo;
} request;
__be32 response[4];
dma_addr_t response_bus;
struct completion done;
struct sbp2_status status;
};
struct sbp2_login_response {
__be32 misc;
struct sbp2_pointer command_block_agent;
__be32 reconnect_hold;
};
#define COMMAND_ORB_DATA_SIZE(v) ((v))
#define COMMAND_ORB_PAGE_SIZE(v) ((v) << 16)
#define COMMAND_ORB_PAGE_TABLE_PRESENT ((1) << 19)
#define COMMAND_ORB_MAX_PAYLOAD(v) ((v) << 20)
#define COMMAND_ORB_SPEED(v) ((v) << 24)
#define COMMAND_ORB_DIRECTION ((1) << 27)
#define COMMAND_ORB_REQUEST_FORMAT(v) ((v) << 29)
#define COMMAND_ORB_NOTIFY ((1) << 31)
struct sbp2_command_orb {
struct sbp2_orb base;
struct {
struct sbp2_pointer next;
struct sbp2_pointer data_descriptor;
__be32 misc;
u8 command_block[SBP2_MAX_CDB_SIZE];
} request;
struct scsi_cmnd *cmd;
struct sbp2_pointer page_table[SG_ALL] __attribute__((aligned(8)));
dma_addr_t page_table_bus;
};
#define SBP2_ROM_VALUE_WILDCARD ~0 /* match all */
#define SBP2_ROM_VALUE_MISSING 0xff000000 /* not present in the unit dir. */
/*
* List of devices with known bugs.
*
* The firmware_revision field, masked with 0xffff00, is the best
* indicator for the type of bridge chip of a device. It yields a few
* false positives but this did not break correctly behaving devices
* so far.
*/
static const struct {
u32 firmware_revision;
u32 model;
unsigned int workarounds;
} sbp2_workarounds_table[] = {
/* DViCO Momobay CX-1 with TSB42AA9 bridge */ {
.firmware_revision = 0x002800,
.model = 0x001010,
.workarounds = SBP2_WORKAROUND_INQUIRY_36 |
SBP2_WORKAROUND_MODE_SENSE_8 |
SBP2_WORKAROUND_POWER_CONDITION,
},
/* DViCO Momobay FX-3A with TSB42AA9A bridge */ {
.firmware_revision = 0x002800,
.model = 0x000000,
.workarounds = SBP2_WORKAROUND_POWER_CONDITION,
},
/* Initio bridges, actually only needed for some older ones */ {
.firmware_revision = 0x000200,
.model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_INQUIRY_36,
},
/* PL-3507 bridge with Prolific firmware */ {
.firmware_revision = 0x012800,
.model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_POWER_CONDITION,
},
/* Symbios bridge */ {
.firmware_revision = 0xa0b800,
.model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
},
/* Datafab MD2-FW2 with Symbios/LSILogic SYM13FW500 bridge */ {
.firmware_revision = 0x002600,
.model = SBP2_ROM_VALUE_WILDCARD,
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS,
},
/*
* iPod 2nd generation: needs 128k max transfer size workaround
* iPod 3rd generation: needs fix capacity workaround
*/
{
.firmware_revision = 0x0a2700,
.model = 0x000000,
.workarounds = SBP2_WORKAROUND_128K_MAX_TRANS |
SBP2_WORKAROUND_FIX_CAPACITY,
},
/* iPod 4th generation */ {
.firmware_revision = 0x0a2700,
.model = 0x000021,
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
},
/* iPod mini */ {
.firmware_revision = 0x0a2700,
.model = 0x000022,
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
},
/* iPod mini */ {
.firmware_revision = 0x0a2700,
.model = 0x000023,
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
},
/* iPod Photo */ {
.firmware_revision = 0x0a2700,
.model = 0x00007e,
.workarounds = SBP2_WORKAROUND_FIX_CAPACITY,
}
};
static void free_orb(struct kref *kref)
{
struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
kfree(orb);
}
static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
int generation, unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
struct sbp2_orb *orb = NULL, *iter;
struct sbp2_status status;
unsigned long flags;
if (tcode != TCODE_WRITE_BLOCK_REQUEST ||
length < 8 || length > sizeof(status)) {
fw_send_response(card, request, RCODE_TYPE_ERROR);
return;
}
status.status = be32_to_cpup(payload);
status.orb_low = be32_to_cpup(payload + 4);
memset(status.data, 0, sizeof(status.data));
if (length > 8)
memcpy(status.data, payload + 8, length - 8);
if (STATUS_GET_SOURCE(status) == 2 || STATUS_GET_SOURCE(status) == 3) {
dev_notice(lu_dev(lu),
"non-ORB related status write, not handled\n");
fw_send_response(card, request, RCODE_COMPLETE);
return;
}
/* Lookup the orb corresponding to this status write. */
spin_lock_irqsave(&lu->tgt->lock, flags);
list_for_each_entry(iter, &lu->orb_list, link) {
if (STATUS_GET_ORB_HIGH(status) == 0 &&
STATUS_GET_ORB_LOW(status) == iter->request_bus) {
iter->rcode = RCODE_COMPLETE;
list_del(&iter->link);
orb = iter;
break;
}
}
spin_unlock_irqrestore(&lu->tgt->lock, flags);
if (orb) {
orb->callback(orb, &status);
kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
dev_err(lu_dev(lu), "status write for unknown ORB\n");
}
fw_send_response(card, request, RCODE_COMPLETE);
}
static void complete_transaction(struct fw_card *card, int rcode,
void *payload, size_t length, void *data)
{
struct sbp2_orb *orb = data;
unsigned long flags;
/*
* This is a little tricky. We can get the status write for
* the orb before we get this callback. The status write
* handler above will assume the orb pointer transaction was
* successful and set the rcode to RCODE_COMPLETE for the orb.
* So this callback only sets the rcode if it hasn't already
* been set and only does the cleanup if the transaction
* failed and we didn't already get a status write.
*/
spin_lock_irqsave(&orb->lu->tgt->lock, flags);
if (orb->rcode == -1)
orb->rcode = rcode;
if (orb->rcode != RCODE_COMPLETE) {
list_del(&orb->link);
spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
orb->callback(orb, NULL);
kref_put(&orb->kref, free_orb); /* orb callback reference */
} else {
spin_unlock_irqrestore(&orb->lu->tgt->lock, flags);
}
kref_put(&orb->kref, free_orb); /* transaction callback reference */
}
static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
int node_id, int generation, u64 offset)
{
struct fw_device *device = target_parent_device(lu->tgt);
struct sbp2_pointer orb_pointer;
unsigned long flags;
orb_pointer.high = 0;
orb_pointer.low = cpu_to_be32(orb->request_bus);
orb->lu = lu;
spin_lock_irqsave(&lu->tgt->lock, flags);
list_add_tail(&orb->link, &lu->orb_list);
spin_unlock_irqrestore(&lu->tgt->lock, flags);
kref_get(&orb->kref); /* transaction callback reference */
kref_get(&orb->kref); /* orb callback reference */
fw_send_request(device->card, &orb->t, TCODE_WRITE_BLOCK_REQUEST,
node_id, generation, device->max_speed, offset,
&orb_pointer, 8, complete_transaction, orb);
}
static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
{
struct fw_device *device = target_parent_device(lu->tgt);
struct sbp2_orb *orb, *next;
struct list_head list;
int retval = -ENOENT;
INIT_LIST_HEAD(&list);
spin_lock_irq(&lu->tgt->lock);
list_splice_init(&lu->orb_list, &list);
spin_unlock_irq(&lu->tgt->lock);
list_for_each_entry_safe(orb, next, &list, link) {
retval = 0;
if (fw_cancel_transaction(device->card, &orb->t) == 0)
continue;
orb->rcode = RCODE_CANCELLED;
orb->callback(orb, NULL);
kref_put(&orb->kref, free_orb); /* orb callback reference */
}
return retval;
}
static void complete_management_orb(struct sbp2_orb *base_orb,
struct sbp2_status *status)
{
struct sbp2_management_orb *orb =
container_of(base_orb, struct sbp2_management_orb, base);
if (status)
memcpy(&orb->status, status, sizeof(*status));
complete(&orb->done);
}
static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
int generation, int function,
int lun_or_login_id, void *response)
{
struct fw_device *device = target_parent_device(lu->tgt);
struct sbp2_management_orb *orb;
unsigned int timeout;
int retval = -ENOMEM;
if (function == SBP2_LOGOUT_REQUEST && fw_device_is_shutdown(device))
return 0;
orb = kzalloc(sizeof(*orb), GFP_NOIO);
if (orb == NULL)
return -ENOMEM;
kref_init(&orb->base.kref);
orb->response_bus =
dma_map_single(device->card->device, &orb->response,
sizeof(orb->response), DMA_FROM_DEVICE);
if (dma_mapping_error(device->card->device, orb->response_bus))
goto fail_mapping_response;
orb->request.response.high = 0;
orb->request.response.low = cpu_to_be32(orb->response_bus);
orb->request.misc = cpu_to_be32(
MANAGEMENT_ORB_NOTIFY |
MANAGEMENT_ORB_FUNCTION(function) |
MANAGEMENT_ORB_LUN(lun_or_login_id));
orb->request.length = cpu_to_be32(
MANAGEMENT_ORB_RESPONSE_LENGTH(sizeof(orb->response)));
orb->request.status_fifo.high =
cpu_to_be32(lu->address_handler.offset >> 32);
orb->request.status_fifo.low =
cpu_to_be32(lu->address_handler.offset);
if (function == SBP2_LOGIN_REQUEST) {
/* Ask for 2^2 == 4 seconds reconnect grace period */
orb->request.misc |= cpu_to_be32(
MANAGEMENT_ORB_RECONNECT(2) |
MANAGEMENT_ORB_EXCLUSIVE(sbp2_param_exclusive_login));
timeout = lu->tgt->mgt_orb_timeout;
} else {
timeout = SBP2_ORB_TIMEOUT;
}
init_completion(&orb->done);
orb->base.callback = complete_management_orb;
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
if (dma_mapping_error(device->card->device, orb->base.request_bus))
goto fail_mapping_request;
sbp2_send_orb(&orb->base, lu, node_id, generation,
lu->tgt->management_agent_address);
wait_for_completion_timeout(&orb->done, msecs_to_jiffies(timeout));
retval = -EIO;
if (sbp2_cancel_orbs(lu) == 0) {
dev_err(lu_dev(lu), "ORB reply timed out, rcode 0x%02x\n",
orb->base.rcode);
goto out;
}
if (orb->base.rcode != RCODE_COMPLETE) {
dev_err(lu_dev(lu), "management write failed, rcode 0x%02x\n",
orb->base.rcode);
goto out;
}
if (STATUS_GET_RESPONSE(orb->status) != 0 ||
STATUS_GET_SBP_STATUS(orb->status) != 0) {
dev_err(lu_dev(lu), "error status: %d:%d\n",
STATUS_GET_RESPONSE(orb->status),
STATUS_GET_SBP_STATUS(orb->status));
goto out;
}
retval = 0;
out:
dma_unmap_single(device->card->device, orb->base.request_bus,
sizeof(orb->request), DMA_TO_DEVICE);
fail_mapping_request:
dma_unmap_single(device->card->device, orb->response_bus,
sizeof(orb->response), DMA_FROM_DEVICE);
fail_mapping_response:
if (response)
memcpy(response, orb->response, sizeof(orb->response));
kref_put(&orb->base.kref, free_orb);
return retval;
}
static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
{
struct fw_device *device = target_parent_device(lu->tgt);
__be32 d = 0;
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
lu->command_block_agent_address + SBP2_AGENT_RESET,
&d, 4);
}
static void complete_agent_reset_write_no_wait(struct fw_card *card,
int rcode, void *payload, size_t length, void *data)
{
kfree(data);
}
static void sbp2_agent_reset_no_wait(struct sbp2_logical_unit *lu)
{
struct fw_device *device = target_parent_device(lu->tgt);
struct fw_transaction *t;
static __be32 d;
t = kmalloc(sizeof(*t), GFP_ATOMIC);
if (t == NULL)
return;
fw_send_request(device->card, t, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
lu->command_block_agent_address + SBP2_AGENT_RESET,
&d, 4, complete_agent_reset_write_no_wait, t);
}
static inline void sbp2_allow_block(struct sbp2_target *tgt)
{
spin_lock_irq(&tgt->lock);
--tgt->dont_block;
spin_unlock_irq(&tgt->lock);
}
/*
* Blocks lu->tgt if all of the following conditions are met:
* - Login, INQUIRY, and high-level SCSI setup of all of the target's
* logical units have been finished (indicated by dont_block == 0).
* - lu->generation is stale.
*
* Note, scsi_block_requests() must be called while holding tgt->lock,
* otherwise it might foil sbp2_[conditionally_]unblock()'s attempt to
* unblock the target.
*/
static void sbp2_conditionally_block(struct sbp2_logical_unit *lu)
{
struct sbp2_target *tgt = lu->tgt;
struct fw_card *card = target_parent_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
unsigned long flags;
spin_lock_irqsave(&tgt->lock, flags);
if (!tgt->dont_block && !lu->blocked &&
lu->generation != card->generation) {
lu->blocked = true;
if (++tgt->blocked == 1)
scsi_block_requests(shost);
}
spin_unlock_irqrestore(&tgt->lock, flags);
}
/*
* Unblocks lu->tgt as soon as all its logical units can be unblocked.
* Note, it is harmless to run scsi_unblock_requests() outside the
* tgt->lock protected section. On the other hand, running it inside
* the section might clash with shost->host_lock.
*/
static void sbp2_conditionally_unblock(struct sbp2_logical_unit *lu)
{
struct sbp2_target *tgt = lu->tgt;
struct fw_card *card = target_parent_device(tgt)->card;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
bool unblock = false;
spin_lock_irq(&tgt->lock);
if (lu->blocked && lu->generation == card->generation) {
lu->blocked = false;
unblock = --tgt->blocked == 0;
}
spin_unlock_irq(&tgt->lock);
if (unblock)
scsi_unblock_requests(shost);
}
/*
* Prevents future blocking of tgt and unblocks it.
* Note, it is harmless to run scsi_unblock_requests() outside the
* tgt->lock protected section. On the other hand, running it inside
* the section might clash with shost->host_lock.
*/
static void sbp2_unblock(struct sbp2_target *tgt)
{
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
spin_lock_irq(&tgt->lock);
++tgt->dont_block;
spin_unlock_irq(&tgt->lock);
scsi_unblock_requests(shost);
}
static int sbp2_lun2int(u16 lun)
{
struct scsi_lun eight_bytes_lun;
memset(&eight_bytes_lun, 0, sizeof(eight_bytes_lun));
eight_bytes_lun.scsi_lun[0] = (lun >> 8) & 0xff;
eight_bytes_lun.scsi_lun[1] = lun & 0xff;
return scsilun_to_int(&eight_bytes_lun);
}
/*
* Write retransmit retry values into the BUSY_TIMEOUT register.
* - The single-phase retry protocol is supported by all SBP-2 devices, but the
* default retry_limit value is 0 (i.e. never retry transmission). We write a
* saner value after logging into the device.
* - The dual-phase retry protocol is optional to implement, and if not
* supported, writes to the dual-phase portion of the register will be
* ignored. We try to write the original 1394-1995 default here.
* - In the case of devices that are also SBP-3-compliant, all writes are
* ignored, as the register is read-only, but contains single-phase retry of
* 15, which is what we're trying to set for all SBP-2 device anyway, so this
* write attempt is safe and yields more consistent behavior for all devices.
*
* See section 8.3.2.3.5 of the 1394-1995 spec, section 6.2 of the SBP-2 spec,
* and section 6.4 of the SBP-3 spec for further details.
*/
static void sbp2_set_busy_timeout(struct sbp2_logical_unit *lu)
{
struct fw_device *device = target_parent_device(lu->tgt);
__be32 d = cpu_to_be32(SBP2_CYCLE_LIMIT | SBP2_RETRY_LIMIT);
fw_run_transaction(device->card, TCODE_WRITE_QUADLET_REQUEST,
lu->tgt->node_id, lu->generation, device->max_speed,
CSR_REGISTER_BASE + CSR_BUSY_TIMEOUT, &d, 4);
}
static void sbp2_reconnect(struct work_struct *work);
static void sbp2_login(struct work_struct *work)
{
struct sbp2_logical_unit *lu =
container_of(work, struct sbp2_logical_unit, work.work);
struct sbp2_target *tgt = lu->tgt;
struct fw_device *device = target_parent_device(tgt);
struct Scsi_Host *shost;
struct scsi_device *sdev;
struct sbp2_login_response response;
int generation, node_id, local_node_id;
if (fw_device_is_shutdown(device))
return;
generation = device->generation;
smp_rmb(); /* node IDs must not be older than generation */
node_id = device->node_id;
local_node_id = device->card->node_id;
/* If this is a re-login attempt, log out, or we might be rejected. */
if (lu->has_sdev)
sbp2_send_management_orb(lu, device->node_id, generation,
SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
if (sbp2_send_management_orb(lu, node_id, generation,
SBP2_LOGIN_REQUEST, lu->lun, &response) < 0) {
if (lu->retries++ < 5) {
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
} else {
dev_err(tgt_dev(tgt), "failed to login to LUN %04x\n",
lu->lun);
/* Let any waiting I/O fail from now on. */
sbp2_unblock(lu->tgt);
}
return;
}
tgt->node_id = node_id;
tgt->address_high = local_node_id << 16;
smp_wmb(); /* node IDs must not be older than generation */
lu->generation = generation;
lu->command_block_agent_address =
((u64)(be32_to_cpu(response.command_block_agent.high) & 0xffff)
<< 32) | be32_to_cpu(response.command_block_agent.low);
lu->login_id = be32_to_cpu(response.misc) & 0xffff;
dev_notice(tgt_dev(tgt), "logged in to LUN %04x (%d retries)\n",
lu->lun, lu->retries);
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
sbp2_set_busy_timeout(lu);
lu->workfn = sbp2_reconnect;
sbp2_agent_reset(lu);
/* This was a re-login. */
if (lu->has_sdev) {
sbp2_cancel_orbs(lu);
sbp2_conditionally_unblock(lu);
return;
}
if (lu->tgt->workarounds & SBP2_WORKAROUND_DELAY_INQUIRY)
ssleep(SBP2_INQUIRY_DELAY);
shost = container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
sdev = __scsi_add_device(shost, 0, 0, sbp2_lun2int(lu->lun), lu);
/*
* FIXME: We are unable to perform reconnects while in sbp2_login().
* Therefore __scsi_add_device() will get into trouble if a bus reset
* happens in parallel. It will either fail or leave us with an
* unusable sdev. As a workaround we check for this and retry the
* whole login and SCSI probing.
*/
/* Reported error during __scsi_add_device() */
if (IS_ERR(sdev))
goto out_logout_login;
/* Unreported error during __scsi_add_device() */
smp_rmb(); /* get current card generation */
if (generation != device->card->generation) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
goto out_logout_login;
}
/* No error during __scsi_add_device() */
lu->has_sdev = true;
scsi_device_put(sdev);
sbp2_allow_block(tgt);
return;
out_logout_login:
smp_rmb(); /* generation may have changed */
generation = device->generation;
smp_rmb(); /* node_id must not be older than generation */
sbp2_send_management_orb(lu, device->node_id, generation,
SBP2_LOGOUT_REQUEST, lu->login_id, NULL);
/*
* If a bus reset happened, sbp2_update will have requeued
* lu->work already. Reset the work from reconnect to login.
*/
lu->workfn = sbp2_login;
}
static void sbp2_reconnect(struct work_struct *work)
{
struct sbp2_logical_unit *lu =
container_of(work, struct sbp2_logical_unit, work.work);
struct sbp2_target *tgt = lu->tgt;
struct fw_device *device = target_parent_device(tgt);
int generation, node_id, local_node_id;
if (fw_device_is_shutdown(device))
return;
generation = device->generation;
smp_rmb(); /* node IDs must not be older than generation */
node_id = device->node_id;
local_node_id = device->card->node_id;
if (sbp2_send_management_orb(lu, node_id, generation,
SBP2_RECONNECT_REQUEST,
lu->login_id, NULL) < 0) {
/*
* If reconnect was impossible even though we are in the
* current generation, fall back and try to log in again.
*
* We could check for "Function rejected" status, but
* looking at the bus generation as simpler and more general.
*/
smp_rmb(); /* get current card generation */
if (generation == device->card->generation ||
lu->retries++ >= 5) {
dev_err(tgt_dev(tgt), "failed to reconnect\n");
lu->retries = 0;
lu->workfn = sbp2_login;
}
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
return;
}
tgt->node_id = node_id;
tgt->address_high = local_node_id << 16;
smp_wmb(); /* node IDs must not be older than generation */
lu->generation = generation;
dev_notice(tgt_dev(tgt), "reconnected to LUN %04x (%d retries)\n",
lu->lun, lu->retries);
sbp2_agent_reset(lu);
sbp2_cancel_orbs(lu);
sbp2_conditionally_unblock(lu);
}
static void sbp2_lu_workfn(struct work_struct *work)
{
struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
struct sbp2_logical_unit, work);
lu->workfn(work);
}
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
{
struct sbp2_logical_unit *lu;
lu = kmalloc(sizeof(*lu), GFP_KERNEL);
if (!lu)
return -ENOMEM;
lu->address_handler.length = 0x100;
lu->address_handler.address_callback = sbp2_status_write;
lu->address_handler.callback_data = lu;
if (fw_core_add_address_handler(&lu->address_handler,
&fw_high_memory_region) < 0) {
kfree(lu);
return -ENOMEM;
}
lu->tgt = tgt;
lu->lun = lun_entry & 0xffff;
lu->login_id = INVALID_LOGIN_ID;
lu->retries = 0;
lu->has_sdev = false;
lu->blocked = false;
++tgt->dont_block;
INIT_LIST_HEAD(&lu->orb_list);
lu->workfn = sbp2_login;
INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
list_add_tail(&lu->link, &tgt->lu_list);
return 0;
}
static void sbp2_get_unit_unique_id(struct sbp2_target *tgt,
const u32 *leaf)
{
if ((leaf[0] & 0xffff0000) == 0x00020000)
tgt->guid = (u64)leaf[1] << 32 | leaf[2];
}
static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value))
if (key == SBP2_CSR_LOGICAL_UNIT_NUMBER &&
sbp2_add_logical_unit(tgt, value) < 0)
return -ENOMEM;
return 0;
}
static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
u32 *model, u32 *firmware_revision)
{
struct fw_csr_iterator ci;
int key, value;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
switch (key) {
case CSR_DEPENDENT_INFO | CSR_OFFSET:
tgt->management_agent_address =
CSR_REGISTER_BASE + 4 * value;
break;
case CSR_DIRECTORY_ID:
tgt->directory_id = value;
break;
case CSR_MODEL:
*model = value;
break;
case SBP2_CSR_FIRMWARE_REVISION:
*firmware_revision = value;
break;
case SBP2_CSR_UNIT_CHARACTERISTICS:
/* the timeout value is stored in 500ms units */
tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500;
break;
case SBP2_CSR_LOGICAL_UNIT_NUMBER:
if (sbp2_add_logical_unit(tgt, value) < 0)
return -ENOMEM;
break;
case SBP2_CSR_UNIT_UNIQUE_ID:
sbp2_get_unit_unique_id(tgt, ci.p - 1 + value);
break;
case SBP2_CSR_LOGICAL_UNIT_DIRECTORY:
/* Adjust for the increment in the iterator */
if (sbp2_scan_logical_unit_dir(tgt, ci.p - 1 + value) < 0)
return -ENOMEM;
break;
}
}
return 0;
}
/*
* Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
* provided in the config rom. Most devices do provide a value, which
* we'll use for login management orbs, but with some sane limits.
*/
static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
{
unsigned int timeout = tgt->mgt_orb_timeout;
if (timeout > 40000)
dev_notice(tgt_dev(tgt), "%ds mgt_ORB_timeout limited to 40s\n",
timeout / 1000);
tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
}
static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
u32 firmware_revision)
{
int i;
unsigned int w = sbp2_param_workarounds;
if (w)
dev_notice(tgt_dev(tgt),
"Please notify [email protected] "
"if you need the workarounds parameter\n");
if (w & SBP2_WORKAROUND_OVERRIDE)
goto out;
for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) {
if (sbp2_workarounds_table[i].firmware_revision !=
(firmware_revision & 0xffffff00))
continue;
if (sbp2_workarounds_table[i].model != model &&
sbp2_workarounds_table[i].model != SBP2_ROM_VALUE_WILDCARD)
continue;
w |= sbp2_workarounds_table[i].workarounds;
break;
}
out:
if (w)
dev_notice(tgt_dev(tgt), "workarounds 0x%x "
"(firmware_revision 0x%06x, model_id 0x%06x)\n",
w, firmware_revision, model);
tgt->workarounds = w;
}
static const struct scsi_host_template scsi_driver_template;
static void sbp2_remove(struct fw_unit *unit);
static int sbp2_probe(struct fw_unit *unit, const struct ieee1394_device_id *id)
{
struct fw_device *device = fw_parent_device(unit);
struct sbp2_target *tgt;
struct sbp2_logical_unit *lu;
struct Scsi_Host *shost;
u32 model, firmware_revision;
/* cannot (or should not) handle targets on the local node */
if (device->is_local)
return -ENODEV;
shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt));
if (shost == NULL)
return -ENOMEM;
tgt = (struct sbp2_target *)shost->hostdata;
dev_set_drvdata(&unit->device, tgt);
tgt->unit = unit;
INIT_LIST_HEAD(&tgt->lu_list);
spin_lock_init(&tgt->lock);
tgt->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
if (fw_device_enable_phys_dma(device) < 0)
goto fail_shost_put;
shost->max_cmd_len = SBP2_MAX_CDB_SIZE;
if (scsi_add_host_with_dma(shost, &unit->device,
device->card->device) < 0)
goto fail_shost_put;
/* implicit directory ID */
tgt->directory_id = ((unit->directory - device->config_rom) * 4
+ CSR_CONFIG_ROM) & 0xffffff;
firmware_revision = SBP2_ROM_VALUE_MISSING;
model = SBP2_ROM_VALUE_MISSING;
if (sbp2_scan_unit_dir(tgt, unit->directory, &model,
&firmware_revision) < 0)
goto fail_remove;
sbp2_clamp_management_orb_timeout(tgt);
sbp2_init_workarounds(tgt, model, firmware_revision);
/*
* At S100 we can do 512 bytes per packet, at S200 1024 bytes,
* and so on up to 4096 bytes. The SBP-2 max_payload field
* specifies the max payload size as 2 ^ (max_payload + 2), so
* if we set this to max_speed + 7, we get the right value.
*/
tgt->max_payload = min3(device->max_speed + 7, 10U,
device->card->max_receive - 1);
/* Do the login in a workqueue so we can easily reschedule retries. */
list_for_each_entry(lu, &tgt->lu_list, link)
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
return 0;
fail_remove:
sbp2_remove(unit);
return -ENOMEM;
fail_shost_put:
scsi_host_put(shost);
return -ENOMEM;
}
static void sbp2_update(struct fw_unit *unit)
{
struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
struct sbp2_logical_unit *lu;
fw_device_enable_phys_dma(fw_parent_device(unit));
/*
* Fw-core serializes sbp2_update() against sbp2_remove().
* Iteration over tgt->lu_list is therefore safe here.
*/
list_for_each_entry(lu, &tgt->lu_list, link) {
sbp2_conditionally_block(lu);
lu->retries = 0;
sbp2_queue_work(lu, 0);
}
}
static void sbp2_remove(struct fw_unit *unit)
{
struct fw_device *device = fw_parent_device(unit);
struct sbp2_target *tgt = dev_get_drvdata(&unit->device);
struct sbp2_logical_unit *lu, *next;
struct Scsi_Host *shost =
container_of((void *)tgt, struct Scsi_Host, hostdata[0]);
struct scsi_device *sdev;
/* prevent deadlocks */
sbp2_unblock(tgt);
list_for_each_entry_safe(lu, next, &tgt->lu_list, link) {
cancel_delayed_work_sync(&lu->work);
sdev = scsi_device_lookup(shost, 0, 0, sbp2_lun2int(lu->lun));
if (sdev) {
scsi_remove_device(sdev);
scsi_device_put(sdev);
}
if (lu->login_id != INVALID_LOGIN_ID) {
int generation, node_id;
/*
* tgt->node_id may be obsolete here if we failed
* during initial login or after a bus reset where
* the topology changed.
*/
generation = device->generation;
smp_rmb(); /* node_id vs. generation */
node_id = device->node_id;
sbp2_send_management_orb(lu, node_id, generation,
SBP2_LOGOUT_REQUEST,
lu->login_id, NULL);
}
fw_core_remove_address_handler(&lu->address_handler);
list_del(&lu->link);
kfree(lu);
}
scsi_remove_host(shost);
dev_notice(&unit->device, "released target %d:0:0\n", shost->host_no);
scsi_host_put(shost);
}
#define SBP2_UNIT_SPEC_ID_ENTRY 0x0000609e
#define SBP2_SW_VERSION_ENTRY 0x00010483
static const struct ieee1394_device_id sbp2_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY,
.version = SBP2_SW_VERSION_ENTRY,
},
{ }
};
static struct fw_driver sbp2_driver = {
.driver = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.bus = &fw_bus_type,
},
.probe = sbp2_probe,
.update = sbp2_update,
.remove = sbp2_remove,
.id_table = sbp2_id_table,
};
static void sbp2_unmap_scatterlist(struct device *card_device,
struct sbp2_command_orb *orb)
{
scsi_dma_unmap(orb->cmd);
if (orb->request.misc & cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT))
dma_unmap_single(card_device, orb->page_table_bus,
sizeof(orb->page_table), DMA_TO_DEVICE);
}
static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
{
int sam_status;
int sfmt = (sbp2_status[0] >> 6) & 0x03;
if (sfmt == 2 || sfmt == 3) {
/*
* Reserved for future standardization (2) or
* Status block format vendor-dependent (3)
*/
return DID_ERROR << 16;
}
sense_data[0] = 0x70 | sfmt | (sbp2_status[1] & 0x80);
sense_data[1] = 0x0;
sense_data[2] = ((sbp2_status[1] << 1) & 0xe0) | (sbp2_status[1] & 0x0f);
sense_data[3] = sbp2_status[4];
sense_data[4] = sbp2_status[5];
sense_data[5] = sbp2_status[6];
sense_data[6] = sbp2_status[7];
sense_data[7] = 10;
sense_data[8] = sbp2_status[8];
sense_data[9] = sbp2_status[9];
sense_data[10] = sbp2_status[10];
sense_data[11] = sbp2_status[11];
sense_data[12] = sbp2_status[2];
sense_data[13] = sbp2_status[3];
sense_data[14] = sbp2_status[12];
sense_data[15] = sbp2_status[13];
sam_status = sbp2_status[0] & 0x3f;
switch (sam_status) {
case SAM_STAT_GOOD:
case SAM_STAT_CHECK_CONDITION:
case SAM_STAT_CONDITION_MET:
case SAM_STAT_BUSY:
case SAM_STAT_RESERVATION_CONFLICT:
case SAM_STAT_COMMAND_TERMINATED:
return DID_OK << 16 | sam_status;
default:
return DID_ERROR << 16;
}
}
static void complete_command_orb(struct sbp2_orb *base_orb,
struct sbp2_status *status)
{
struct sbp2_command_orb *orb =
container_of(base_orb, struct sbp2_command_orb, base);
struct fw_device *device = target_parent_device(base_orb->lu->tgt);
int result;
if (status != NULL) {
if (STATUS_GET_DEAD(*status))
sbp2_agent_reset_no_wait(base_orb->lu);
switch (STATUS_GET_RESPONSE(*status)) {
case SBP2_STATUS_REQUEST_COMPLETE:
result = DID_OK << 16;
break;
case SBP2_STATUS_TRANSPORT_FAILURE:
result = DID_BUS_BUSY << 16;
break;
case SBP2_STATUS_ILLEGAL_REQUEST:
case SBP2_STATUS_VENDOR_DEPENDENT:
default:
result = DID_ERROR << 16;
break;
}
if (result == DID_OK << 16 && STATUS_GET_LEN(*status) > 1)
result = sbp2_status_to_sense_data(STATUS_GET_DATA(*status),
orb->cmd->sense_buffer);
} else {
/*
* If the orb completes with status == NULL, something
* went wrong, typically a bus reset happened mid-orb
* or when sending the write (less likely).
*/
result = DID_BUS_BUSY << 16;
sbp2_conditionally_block(base_orb->lu);
}
dma_unmap_single(device->card->device, orb->base.request_bus,
sizeof(orb->request), DMA_TO_DEVICE);
sbp2_unmap_scatterlist(device->card->device, orb);
orb->cmd->result = result;
scsi_done(orb->cmd);
}
static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
struct fw_device *device, struct sbp2_logical_unit *lu)
{
struct scatterlist *sg = scsi_sglist(orb->cmd);
int i, n;
n = scsi_dma_map(orb->cmd);
if (n <= 0)
goto fail;
/*
* Handle the special case where there is only one element in
* the scatter list by converting it to an immediate block
* request. This is also a workaround for broken devices such
* as the second generation iPod which doesn't support page
* tables.
*/
if (n == 1) {
orb->request.data_descriptor.high =
cpu_to_be32(lu->tgt->address_high);
orb->request.data_descriptor.low =
cpu_to_be32(sg_dma_address(sg));
orb->request.misc |=
cpu_to_be32(COMMAND_ORB_DATA_SIZE(sg_dma_len(sg)));
return 0;
}
for_each_sg(sg, sg, n, i) {
orb->page_table[i].high = cpu_to_be32(sg_dma_len(sg) << 16);
orb->page_table[i].low = cpu_to_be32(sg_dma_address(sg));
}
orb->page_table_bus =
dma_map_single(device->card->device, orb->page_table,
sizeof(orb->page_table), DMA_TO_DEVICE);
if (dma_mapping_error(device->card->device, orb->page_table_bus))
goto fail_page_table;
/*
* The data_descriptor pointer is the one case where we need
* to fill in the node ID part of the address. All other
* pointers assume that the data referenced reside on the
* initiator (i.e. us), but data_descriptor can refer to data
* on other nodes so we need to put our ID in descriptor.high.
*/
orb->request.data_descriptor.high = cpu_to_be32(lu->tgt->address_high);
orb->request.data_descriptor.low = cpu_to_be32(orb->page_table_bus);
orb->request.misc |= cpu_to_be32(COMMAND_ORB_PAGE_TABLE_PRESENT |
COMMAND_ORB_DATA_SIZE(n));
return 0;
fail_page_table:
scsi_dma_unmap(orb->cmd);
fail:
return -ENOMEM;
}
/* SCSI stack integration */
static int sbp2_scsi_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *cmd)
{
struct sbp2_logical_unit *lu = cmd->device->hostdata;
struct fw_device *device = target_parent_device(lu->tgt);
struct sbp2_command_orb *orb;
int generation, retval = SCSI_MLQUEUE_HOST_BUSY;
orb = kzalloc(sizeof(*orb), GFP_ATOMIC);
if (orb == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
/* Initialize rcode to something not RCODE_COMPLETE. */
orb->base.rcode = -1;
kref_init(&orb->base.kref);
orb->cmd = cmd;
orb->request.next.high = cpu_to_be32(SBP2_ORB_NULL);
orb->request.misc = cpu_to_be32(
COMMAND_ORB_MAX_PAYLOAD(lu->tgt->max_payload) |
COMMAND_ORB_SPEED(device->max_speed) |
COMMAND_ORB_NOTIFY);
if (cmd->sc_data_direction == DMA_FROM_DEVICE)
orb->request.misc |= cpu_to_be32(COMMAND_ORB_DIRECTION);
generation = device->generation;
smp_rmb(); /* sbp2_map_scatterlist looks at tgt->address_high */
if (scsi_sg_count(cmd) && sbp2_map_scatterlist(orb, device, lu) < 0)
goto out;
memcpy(orb->request.command_block, cmd->cmnd, cmd->cmd_len);
orb->base.callback = complete_command_orb;
orb->base.request_bus =
dma_map_single(device->card->device, &orb->request,
sizeof(orb->request), DMA_TO_DEVICE);
if (dma_mapping_error(device->card->device, orb->base.request_bus)) {
sbp2_unmap_scatterlist(device->card->device, orb);
goto out;
}
sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, generation,
lu->command_block_agent_address + SBP2_ORB_POINTER);
retval = 0;
out:
kref_put(&orb->base.kref, free_orb);
return retval;
}
static int sbp2_scsi_slave_alloc(struct scsi_device *sdev)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
/* (Re-)Adding logical units via the SCSI stack is not supported. */
if (!lu)
return -ENOSYS;
sdev->allow_restart = 1;
/*
* SBP-2 does not require any alignment, but we set it anyway
* for compatibility with earlier versions of this driver.
*/
blk_queue_update_dma_alignment(sdev->request_queue, 4 - 1);
if (lu->tgt->workarounds & SBP2_WORKAROUND_INQUIRY_36)
sdev->inquiry_len = 36;
return 0;
}
static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
{
struct sbp2_logical_unit *lu = sdev->hostdata;
sdev->use_10_for_rw = 1;
if (sbp2_param_exclusive_login)
sdev->manage_start_stop = 1;
if (sdev->type == TYPE_ROM)
sdev->use_10_for_ms = 1;
if (sdev->type == TYPE_DISK &&
lu->tgt->workarounds & SBP2_WORKAROUND_MODE_SENSE_8)
sdev->skip_ms_page_8 = 1;
if (lu->tgt->workarounds & SBP2_WORKAROUND_FIX_CAPACITY)
sdev->fix_capacity = 1;
if (lu->tgt->workarounds & SBP2_WORKAROUND_POWER_CONDITION)
sdev->start_stop_pwr_cond = 1;
if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS)
blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512);
return 0;
}
/*
* Called by scsi stack when something has really gone wrong. Usually
* called when a command has timed-out for some reason.
*/
static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
{
struct sbp2_logical_unit *lu = cmd->device->hostdata;
dev_notice(lu_dev(lu), "sbp2_scsi_abort\n");
sbp2_agent_reset(lu);
sbp2_cancel_orbs(lu);
return SUCCESS;
}
/*
* Format of /sys/bus/scsi/devices/.../ieee1394_id:
* u64 EUI-64 : u24 directory_ID : u16 LUN (all printed in hexadecimal)
*
* This is the concatenation of target port identifier and logical unit
* identifier as per SAM-2...SAM-4 annex A.
*/
static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct sbp2_logical_unit *lu;
if (!sdev)
return 0;
lu = sdev->hostdata;
return sprintf(buf, "%016llx:%06x:%04x\n",
(unsigned long long)lu->tgt->guid,
lu->tgt->directory_id, lu->lun);
}
static DEVICE_ATTR(ieee1394_id, S_IRUGO, sbp2_sysfs_ieee1394_id_show, NULL);
static struct attribute *sbp2_scsi_sysfs_attrs[] = {
&dev_attr_ieee1394_id.attr,
NULL
};
ATTRIBUTE_GROUPS(sbp2_scsi_sysfs);
static const struct scsi_host_template scsi_driver_template = {
.module = THIS_MODULE,
.name = "SBP-2 IEEE-1394",
.proc_name = "sbp2",
.queuecommand = sbp2_scsi_queuecommand,
.slave_alloc = sbp2_scsi_slave_alloc,
.slave_configure = sbp2_scsi_slave_configure,
.eh_abort_handler = sbp2_scsi_abort,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_segment_size = SBP2_MAX_SEG_SIZE,
.can_queue = 1,
.sdev_groups = sbp2_scsi_sysfs_groups,
};
MODULE_AUTHOR("Kristian Hoegsberg <[email protected]>");
MODULE_DESCRIPTION("SCSI over IEEE1394");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table);
/* Provide a module alias so root-on-sbp2 initrds don't break. */
MODULE_ALIAS("sbp2");
static int __init sbp2_init(void)
{
return driver_register(&sbp2_driver.driver);
}
static void __exit sbp2_cleanup(void)
{
driver_unregister(&sbp2_driver.driver);
}
module_init(sbp2_init);
module_exit(sbp2_cleanup);
| linux-master | drivers/firewire/sbp2.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Char device for device raw access
*
* Copyright (C) 2005-2007 Kristian Hoegsberg <[email protected]>
*/
#include <linux/bug.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
#include <linux/irqflags.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/sched.h> /* required for linux/wait.h */
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include "core.h"
/*
* ABI version history is documented in linux/firewire-cdev.h.
*/
#define FW_CDEV_KERNEL_VERSION 5
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
#define FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP 6
struct client {
u32 version;
struct fw_device *device;
spinlock_t lock;
bool in_shutdown;
struct idr resource_idr;
struct list_head event_list;
wait_queue_head_t wait;
wait_queue_head_t tx_flush_wait;
u64 bus_reset_closure;
struct fw_iso_context *iso_context;
u64 iso_closure;
struct fw_iso_buffer buffer;
unsigned long vm_start;
bool buffer_is_mapped;
struct list_head phy_receiver_link;
u64 phy_receiver_closure;
struct list_head link;
struct kref kref;
};
static inline void client_get(struct client *client)
{
kref_get(&client->kref);
}
static void client_release(struct kref *kref)
{
struct client *client = container_of(kref, struct client, kref);
fw_device_put(client->device);
kfree(client);
}
static void client_put(struct client *client)
{
kref_put(&client->kref, client_release);
}
struct client_resource;
typedef void (*client_resource_release_fn_t)(struct client *,
struct client_resource *);
struct client_resource {
client_resource_release_fn_t release;
int handle;
};
struct address_handler_resource {
struct client_resource resource;
struct fw_address_handler handler;
__u64 closure;
struct client *client;
};
struct outbound_transaction_resource {
struct client_resource resource;
struct fw_transaction transaction;
};
struct inbound_transaction_resource {
struct client_resource resource;
struct fw_card *card;
struct fw_request *request;
bool is_fcp;
void *data;
size_t length;
};
struct descriptor_resource {
struct client_resource resource;
struct fw_descriptor descriptor;
u32 data[];
};
struct iso_resource {
struct client_resource resource;
struct client *client;
/* Schedule work and access todo only with client->lock held. */
struct delayed_work work;
enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
int generation;
u64 channels;
s32 bandwidth;
struct iso_resource_event *e_alloc, *e_dealloc;
};
static void release_iso_resource(struct client *, struct client_resource *);
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
{
client_get(r->client);
if (!queue_delayed_work(fw_workqueue, &r->work, delay))
client_put(r->client);
}
static void schedule_if_iso_resource(struct client_resource *resource)
{
if (resource->release == release_iso_resource)
schedule_iso_resource(container_of(resource,
struct iso_resource, resource), 0);
}
/*
* dequeue_event() just kfree()'s the event, so the event has to be
* the first field in a struct XYZ_event.
*/
struct event {
struct { void *data; size_t size; } v[2];
struct list_head link;
};
struct bus_reset_event {
struct event event;
struct fw_cdev_event_bus_reset reset;
};
struct outbound_transaction_event {
struct event event;
struct client *client;
struct outbound_transaction_resource r;
union {
struct fw_cdev_event_response without_tstamp;
struct fw_cdev_event_response2 with_tstamp;
} rsp;
};
struct inbound_transaction_event {
struct event event;
union {
struct fw_cdev_event_request request;
struct fw_cdev_event_request2 request2;
struct fw_cdev_event_request3 with_tstamp;
} req;
};
struct iso_interrupt_event {
struct event event;
struct fw_cdev_event_iso_interrupt interrupt;
};
struct iso_interrupt_mc_event {
struct event event;
struct fw_cdev_event_iso_interrupt_mc interrupt;
};
struct iso_resource_event {
struct event event;
struct fw_cdev_event_iso_resource iso_resource;
};
struct outbound_phy_packet_event {
struct event event;
struct client *client;
struct fw_packet p;
union {
struct fw_cdev_event_phy_packet without_tstamp;
struct fw_cdev_event_phy_packet2 with_tstamp;
} phy_packet;
};
struct inbound_phy_packet_event {
struct event event;
union {
struct fw_cdev_event_phy_packet without_tstamp;
struct fw_cdev_event_phy_packet2 with_tstamp;
} phy_packet;
};
#ifdef CONFIG_COMPAT
static void __user *u64_to_uptr(u64 value)
{
if (in_compat_syscall())
return compat_ptr(value);
else
return (void __user *)(unsigned long)value;
}
static u64 uptr_to_u64(void __user *ptr)
{
if (in_compat_syscall())
return ptr_to_compat(ptr);
else
return (u64)(unsigned long)ptr;
}
#else
static inline void __user *u64_to_uptr(u64 value)
{
return (void __user *)(unsigned long)value;
}
static inline u64 uptr_to_u64(void __user *ptr)
{
return (u64)(unsigned long)ptr;
}
#endif /* CONFIG_COMPAT */
static int fw_device_op_open(struct inode *inode, struct file *file)
{
struct fw_device *device;
struct client *client;
device = fw_device_get_by_devt(inode->i_rdev);
if (device == NULL)
return -ENODEV;
if (fw_device_is_shutdown(device)) {
fw_device_put(device);
return -ENODEV;
}
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (client == NULL) {
fw_device_put(device);
return -ENOMEM;
}
client->device = device;
spin_lock_init(&client->lock);
idr_init(&client->resource_idr);
INIT_LIST_HEAD(&client->event_list);
init_waitqueue_head(&client->wait);
init_waitqueue_head(&client->tx_flush_wait);
INIT_LIST_HEAD(&client->phy_receiver_link);
INIT_LIST_HEAD(&client->link);
kref_init(&client->kref);
file->private_data = client;
return nonseekable_open(inode, file);
}
static void queue_event(struct client *client, struct event *event,
void *data0, size_t size0, void *data1, size_t size1)
{
unsigned long flags;
event->v[0].data = data0;
event->v[0].size = size0;
event->v[1].data = data1;
event->v[1].size = size1;
spin_lock_irqsave(&client->lock, flags);
if (client->in_shutdown)
kfree(event);
else
list_add_tail(&event->link, &client->event_list);
spin_unlock_irqrestore(&client->lock, flags);
wake_up_interruptible(&client->wait);
}
static int dequeue_event(struct client *client,
char __user *buffer, size_t count)
{
struct event *event;
size_t size, total;
int i, ret;
ret = wait_event_interruptible(client->wait,
!list_empty(&client->event_list) ||
fw_device_is_shutdown(client->device));
if (ret < 0)
return ret;
if (list_empty(&client->event_list) &&
fw_device_is_shutdown(client->device))
return -ENODEV;
spin_lock_irq(&client->lock);
event = list_first_entry(&client->event_list, struct event, link);
list_del(&event->link);
spin_unlock_irq(&client->lock);
total = 0;
for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
size = min(event->v[i].size, count - total);
if (copy_to_user(buffer + total, event->v[i].data, size)) {
ret = -EFAULT;
goto out;
}
total += size;
}
ret = total;
out:
kfree(event);
return ret;
}
static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
size_t count, loff_t *offset)
{
struct client *client = file->private_data;
return dequeue_event(client, buffer, count);
}
static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
struct client *client)
{
struct fw_card *card = client->device->card;
spin_lock_irq(&card->lock);
event->closure = client->bus_reset_closure;
event->type = FW_CDEV_EVENT_BUS_RESET;
event->generation = client->device->generation;
event->node_id = client->device->node_id;
event->local_node_id = card->local_node->node_id;
event->bm_node_id = card->bm_node_id;
event->irm_node_id = card->irm_node->node_id;
event->root_node_id = card->root_node->node_id;
spin_unlock_irq(&card->lock);
}
static void for_each_client(struct fw_device *device,
void (*callback)(struct client *client))
{
struct client *c;
mutex_lock(&device->client_list_mutex);
list_for_each_entry(c, &device->client_list, link)
callback(c);
mutex_unlock(&device->client_list_mutex);
}
static int schedule_reallocations(int id, void *p, void *data)
{
schedule_if_iso_resource(p);
return 0;
}
static void queue_bus_reset_event(struct client *client)
{
struct bus_reset_event *e;
e = kzalloc(sizeof(*e), GFP_KERNEL);
if (e == NULL)
return;
fill_bus_reset_event(&e->reset, client);
queue_event(client, &e->event,
&e->reset, sizeof(e->reset), NULL, 0);
spin_lock_irq(&client->lock);
idr_for_each(&client->resource_idr, schedule_reallocations, client);
spin_unlock_irq(&client->lock);
}
void fw_device_cdev_update(struct fw_device *device)
{
for_each_client(device, queue_bus_reset_event);
}
static void wake_up_client(struct client *client)
{
wake_up_interruptible(&client->wait);
}
void fw_device_cdev_remove(struct fw_device *device)
{
for_each_client(device, wake_up_client);
}
union ioctl_arg {
struct fw_cdev_get_info get_info;
struct fw_cdev_send_request send_request;
struct fw_cdev_allocate allocate;
struct fw_cdev_deallocate deallocate;
struct fw_cdev_send_response send_response;
struct fw_cdev_initiate_bus_reset initiate_bus_reset;
struct fw_cdev_add_descriptor add_descriptor;
struct fw_cdev_remove_descriptor remove_descriptor;
struct fw_cdev_create_iso_context create_iso_context;
struct fw_cdev_queue_iso queue_iso;
struct fw_cdev_start_iso start_iso;
struct fw_cdev_stop_iso stop_iso;
struct fw_cdev_get_cycle_timer get_cycle_timer;
struct fw_cdev_allocate_iso_resource allocate_iso_resource;
struct fw_cdev_send_stream_packet send_stream_packet;
struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
struct fw_cdev_send_phy_packet send_phy_packet;
struct fw_cdev_receive_phy_packets receive_phy_packets;
struct fw_cdev_set_iso_channels set_iso_channels;
struct fw_cdev_flush_iso flush_iso;
};
static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_get_info *a = &arg->get_info;
struct fw_cdev_event_bus_reset bus_reset;
unsigned long ret = 0;
client->version = a->version;
a->version = FW_CDEV_KERNEL_VERSION;
a->card = client->device->card->index;
down_read(&fw_device_rwsem);
if (a->rom != 0) {
size_t want = a->rom_length;
size_t have = client->device->config_rom_length * 4;
ret = copy_to_user(u64_to_uptr(a->rom),
client->device->config_rom, min(want, have));
}
a->rom_length = client->device->config_rom_length * 4;
up_read(&fw_device_rwsem);
if (ret != 0)
return -EFAULT;
mutex_lock(&client->device->client_list_mutex);
client->bus_reset_closure = a->bus_reset_closure;
if (a->bus_reset != 0) {
fill_bus_reset_event(&bus_reset, client);
/* unaligned size of bus_reset is 36 bytes */
ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
}
if (ret == 0 && list_empty(&client->link))
list_add_tail(&client->link, &client->device->client_list);
mutex_unlock(&client->device->client_list_mutex);
return ret ? -EFAULT : 0;
}
static int add_client_resource(struct client *client,
struct client_resource *resource, gfp_t gfp_mask)
{
bool preload = gfpflags_allow_blocking(gfp_mask);
unsigned long flags;
int ret;
if (preload)
idr_preload(gfp_mask);
spin_lock_irqsave(&client->lock, flags);
if (client->in_shutdown)
ret = -ECANCELED;
else
ret = idr_alloc(&client->resource_idr, resource, 0, 0,
GFP_NOWAIT);
if (ret >= 0) {
resource->handle = ret;
client_get(client);
schedule_if_iso_resource(resource);
}
spin_unlock_irqrestore(&client->lock, flags);
if (preload)
idr_preload_end();
return ret < 0 ? ret : 0;
}
static int release_client_resource(struct client *client, u32 handle,
client_resource_release_fn_t release,
struct client_resource **return_resource)
{
struct client_resource *resource;
spin_lock_irq(&client->lock);
if (client->in_shutdown)
resource = NULL;
else
resource = idr_find(&client->resource_idr, handle);
if (resource && resource->release == release)
idr_remove(&client->resource_idr, handle);
spin_unlock_irq(&client->lock);
if (!(resource && resource->release == release))
return -EINVAL;
if (return_resource)
*return_resource = resource;
else
resource->release(client, resource);
client_put(client);
return 0;
}
static void release_transaction(struct client *client,
struct client_resource *resource)
{
}
static void complete_transaction(struct fw_card *card, int rcode, u32 request_tstamp,
u32 response_tstamp, void *payload, size_t length, void *data)
{
struct outbound_transaction_event *e = data;
struct client *client = e->client;
unsigned long flags;
spin_lock_irqsave(&client->lock, flags);
idr_remove(&client->resource_idr, e->r.resource.handle);
if (client->in_shutdown)
wake_up(&client->tx_flush_wait);
spin_unlock_irqrestore(&client->lock, flags);
switch (e->rsp.without_tstamp.type) {
case FW_CDEV_EVENT_RESPONSE:
{
struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
if (length < rsp->length)
rsp->length = length;
if (rcode == RCODE_COMPLETE)
memcpy(rsp->data, payload, rsp->length);
rsp->rcode = rcode;
// In the case that sizeof(*rsp) doesn't align with the position of the
// data, and the read is short, preserve an extra copy of the data
// to stay compatible with a pre-2.6.27 bug. Since the bug is harmless
// for short reads and some apps depended on it, this is both safe
// and prudent for compatibility.
if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
queue_event(client, &e->event, rsp, sizeof(*rsp), rsp->data, rsp->length);
else
queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
break;
}
case FW_CDEV_EVENT_RESPONSE2:
{
struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
if (length < rsp->length)
rsp->length = length;
if (rcode == RCODE_COMPLETE)
memcpy(rsp->data, payload, rsp->length);
rsp->rcode = rcode;
rsp->request_tstamp = request_tstamp;
rsp->response_tstamp = response_tstamp;
queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, NULL, 0);
break;
default:
WARN_ON(1);
break;
}
}
/* Drop the idr's reference */
client_put(client);
}
static int init_request(struct client *client,
struct fw_cdev_send_request *request,
int destination_id, int speed)
{
struct outbound_transaction_event *e;
void *payload;
int ret;
if (request->tcode != TCODE_STREAM_DATA &&
(request->length > 4096 || request->length > 512 << speed))
return -EIO;
if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
request->length < 4)
return -EINVAL;
e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
e->client = client;
if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
struct fw_cdev_event_response *rsp = &e->rsp.without_tstamp;
rsp->type = FW_CDEV_EVENT_RESPONSE;
rsp->length = request->length;
rsp->closure = request->closure;
payload = rsp->data;
} else {
struct fw_cdev_event_response2 *rsp = &e->rsp.with_tstamp;
rsp->type = FW_CDEV_EVENT_RESPONSE2;
rsp->length = request->length;
rsp->closure = request->closure;
payload = rsp->data;
}
if (request->data && copy_from_user(payload, u64_to_uptr(request->data), request->length)) {
ret = -EFAULT;
goto failed;
}
e->r.resource.release = release_transaction;
ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
if (ret < 0)
goto failed;
fw_send_request_with_tstamp(client->device->card, &e->r.transaction, request->tcode,
destination_id, request->generation, speed, request->offset,
payload, request->length, complete_transaction, e);
return 0;
failed:
kfree(e);
return ret;
}
static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
{
switch (arg->send_request.tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
case TCODE_READ_QUADLET_REQUEST:
case TCODE_READ_BLOCK_REQUEST:
case TCODE_LOCK_MASK_SWAP:
case TCODE_LOCK_COMPARE_SWAP:
case TCODE_LOCK_FETCH_ADD:
case TCODE_LOCK_LITTLE_ADD:
case TCODE_LOCK_BOUNDED_ADD:
case TCODE_LOCK_WRAP_ADD:
case TCODE_LOCK_VENDOR_DEPENDENT:
break;
default:
return -EINVAL;
}
return init_request(client, &arg->send_request, client->device->node_id,
client->device->max_speed);
}
static void release_request(struct client *client,
struct client_resource *resource)
{
struct inbound_transaction_resource *r = container_of(resource,
struct inbound_transaction_resource, resource);
if (r->is_fcp)
fw_request_put(r->request);
else
fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
fw_card_put(r->card);
kfree(r);
}
static void handle_request(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
int generation, unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
struct address_handler_resource *handler = callback_data;
bool is_fcp = is_in_fcp_region(offset, length);
struct inbound_transaction_resource *r;
struct inbound_transaction_event *e;
size_t event_size0;
int ret;
/* card may be different from handler->client->device->card */
fw_card_get(card);
// Extend the lifetime of data for request so that its payload is safely accessible in
// the process context for the client.
if (is_fcp)
fw_request_get(request);
r = kmalloc(sizeof(*r), GFP_ATOMIC);
e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (r == NULL || e == NULL)
goto failed;
r->card = card;
r->request = request;
r->is_fcp = is_fcp;
r->data = payload;
r->length = length;
r->resource.release = release_request;
ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
if (ret < 0)
goto failed;
if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
struct fw_cdev_event_request *req = &e->req.request;
if (tcode & 0x10)
tcode = TCODE_LOCK_REQUEST;
req->type = FW_CDEV_EVENT_REQUEST;
req->tcode = tcode;
req->offset = offset;
req->length = length;
req->handle = r->resource.handle;
req->closure = handler->closure;
event_size0 = sizeof(*req);
} else if (handler->client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
struct fw_cdev_event_request2 *req = &e->req.request2;
req->type = FW_CDEV_EVENT_REQUEST2;
req->tcode = tcode;
req->offset = offset;
req->source_node_id = source;
req->destination_node_id = destination;
req->card = card->index;
req->generation = generation;
req->length = length;
req->handle = r->resource.handle;
req->closure = handler->closure;
event_size0 = sizeof(*req);
} else {
struct fw_cdev_event_request3 *req = &e->req.with_tstamp;
req->type = FW_CDEV_EVENT_REQUEST3;
req->tcode = tcode;
req->offset = offset;
req->source_node_id = source;
req->destination_node_id = destination;
req->card = card->index;
req->generation = generation;
req->length = length;
req->handle = r->resource.handle;
req->closure = handler->closure;
req->tstamp = fw_request_get_timestamp(request);
event_size0 = sizeof(*req);
}
queue_event(handler->client, &e->event,
&e->req, event_size0, r->data, length);
return;
failed:
kfree(r);
kfree(e);
if (!is_fcp)
fw_send_response(card, request, RCODE_CONFLICT_ERROR);
else
fw_request_put(request);
fw_card_put(card);
}
static void release_address_handler(struct client *client,
struct client_resource *resource)
{
struct address_handler_resource *r =
container_of(resource, struct address_handler_resource, resource);
fw_core_remove_address_handler(&r->handler);
kfree(r);
}
static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_allocate *a = &arg->allocate;
struct address_handler_resource *r;
struct fw_address_region region;
int ret;
r = kmalloc(sizeof(*r), GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
region.start = a->offset;
if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
region.end = a->offset + a->length;
else
region.end = a->region_end;
r->handler.length = a->length;
r->handler.address_callback = handle_request;
r->handler.callback_data = r;
r->closure = a->closure;
r->client = client;
ret = fw_core_add_address_handler(&r->handler, ®ion);
if (ret < 0) {
kfree(r);
return ret;
}
a->offset = r->handler.offset;
r->resource.release = release_address_handler;
ret = add_client_resource(client, &r->resource, GFP_KERNEL);
if (ret < 0) {
release_address_handler(client, &r->resource);
return ret;
}
a->handle = r->resource.handle;
return 0;
}
static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
{
return release_client_resource(client, arg->deallocate.handle,
release_address_handler, NULL);
}
static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_send_response *a = &arg->send_response;
struct client_resource *resource;
struct inbound_transaction_resource *r;
int ret = 0;
if (release_client_resource(client, a->handle,
release_request, &resource) < 0)
return -EINVAL;
r = container_of(resource, struct inbound_transaction_resource,
resource);
if (r->is_fcp) {
fw_request_put(r->request);
goto out;
}
if (a->length != fw_get_response_length(r->request)) {
ret = -EINVAL;
fw_request_put(r->request);
goto out;
}
if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
ret = -EFAULT;
fw_request_put(r->request);
goto out;
}
fw_send_response(r->card, r->request, a->rcode);
out:
fw_card_put(r->card);
kfree(r);
return ret;
}
static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
{
fw_schedule_bus_reset(client->device->card, true,
arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
return 0;
}
static void release_descriptor(struct client *client,
struct client_resource *resource)
{
struct descriptor_resource *r =
container_of(resource, struct descriptor_resource, resource);
fw_core_remove_descriptor(&r->descriptor);
kfree(r);
}
static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
struct descriptor_resource *r;
int ret;
/* Access policy: Allow this ioctl only on local nodes' device files. */
if (!client->device->is_local)
return -ENOSYS;
if (a->length > 256)
return -EINVAL;
r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
ret = -EFAULT;
goto failed;
}
r->descriptor.length = a->length;
r->descriptor.immediate = a->immediate;
r->descriptor.key = a->key;
r->descriptor.data = r->data;
ret = fw_core_add_descriptor(&r->descriptor);
if (ret < 0)
goto failed;
r->resource.release = release_descriptor;
ret = add_client_resource(client, &r->resource, GFP_KERNEL);
if (ret < 0) {
fw_core_remove_descriptor(&r->descriptor);
goto failed;
}
a->handle = r->resource.handle;
return 0;
failed:
kfree(r);
return ret;
}
static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
{
return release_client_resource(client, arg->remove_descriptor.handle,
release_descriptor, NULL);
}
static void iso_callback(struct fw_iso_context *context, u32 cycle,
size_t header_length, void *header, void *data)
{
struct client *client = data;
struct iso_interrupt_event *e;
e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC);
if (e == NULL)
return;
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
e->interrupt.closure = client->iso_closure;
e->interrupt.cycle = cycle;
e->interrupt.header_length = header_length;
memcpy(e->interrupt.header, header, header_length);
queue_event(client, &e->event, &e->interrupt,
sizeof(e->interrupt) + header_length, NULL, 0);
}
static void iso_mc_callback(struct fw_iso_context *context,
dma_addr_t completed, void *data)
{
struct client *client = data;
struct iso_interrupt_mc_event *e;
e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (e == NULL)
return;
e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL;
e->interrupt.closure = client->iso_closure;
e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer,
completed);
queue_event(client, &e->event, &e->interrupt,
sizeof(e->interrupt), NULL, 0);
}
static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
{
if (context->type == FW_ISO_CONTEXT_TRANSMIT)
return DMA_TO_DEVICE;
else
return DMA_FROM_DEVICE;
}
static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
fw_iso_mc_callback_t callback,
void *callback_data)
{
struct fw_iso_context *ctx;
ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
0, 0, 0, NULL, callback_data);
if (!IS_ERR(ctx))
ctx->callback.mc = callback;
return ctx;
}
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
union fw_iso_callback cb;
int ret;
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
FW_CDEV_ISO_CONTEXT_RECEIVE != FW_ISO_CONTEXT_RECEIVE ||
FW_CDEV_ISO_CONTEXT_RECEIVE_MULTICHANNEL !=
FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL);
switch (a->type) {
case FW_ISO_CONTEXT_TRANSMIT:
if (a->speed > SCODE_3200 || a->channel > 63)
return -EINVAL;
cb.sc = iso_callback;
break;
case FW_ISO_CONTEXT_RECEIVE:
if (a->header_size < 4 || (a->header_size & 3) ||
a->channel > 63)
return -EINVAL;
cb.sc = iso_callback;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
cb.mc = iso_mc_callback;
break;
default:
return -EINVAL;
}
if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
context = fw_iso_mc_context_create(client->device->card, cb.mc,
client);
else
context = fw_iso_context_create(client->device->card, a->type,
a->channel, a->speed,
a->header_size, cb.sc, client);
if (IS_ERR(context))
return PTR_ERR(context);
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
context->drop_overflow_headers = true;
/* We only support one context at this time. */
spin_lock_irq(&client->lock);
if (client->iso_context != NULL) {
spin_unlock_irq(&client->lock);
fw_iso_context_destroy(context);
return -EBUSY;
}
if (!client->buffer_is_mapped) {
ret = fw_iso_buffer_map_dma(&client->buffer,
client->device->card,
iso_dma_direction(context));
if (ret < 0) {
spin_unlock_irq(&client->lock);
fw_iso_context_destroy(context);
return ret;
}
client->buffer_is_mapped = true;
}
client->iso_closure = a->closure;
client->iso_context = context;
spin_unlock_irq(&client->lock);
a->handle = 0;
return 0;
}
static int ioctl_set_iso_channels(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_set_iso_channels *a = &arg->set_iso_channels;
struct fw_iso_context *ctx = client->iso_context;
if (ctx == NULL || a->handle != 0)
return -EINVAL;
return fw_iso_context_set_channels(ctx, &a->channels);
}
/* Macros for decoding the iso packet control header. */
#define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff)
#define GET_INTERRUPT(v) (((v) >> 16) & 0x01)
#define GET_SKIP(v) (((v) >> 17) & 0x01)
#define GET_TAG(v) (((v) >> 18) & 0x03)
#define GET_SY(v) (((v) >> 20) & 0x0f)
#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_queue_iso *a = &arg->queue_iso;
struct fw_cdev_iso_packet __user *p, *end, *next;
struct fw_iso_context *ctx = client->iso_context;
unsigned long payload, buffer_end, transmit_header_bytes = 0;
u32 control;
int count;
struct {
struct fw_iso_packet packet;
u8 header[256];
} u;
if (ctx == NULL || a->handle != 0)
return -EINVAL;
/*
* If the user passes a non-NULL data pointer, has mmap()'ed
* the iso buffer, and the pointer points inside the buffer,
* we setup the payload pointers accordingly. Otherwise we
* set them both to 0, which will still let packets with
* payload_length == 0 through. In other words, if no packets
* use the indirect payload, the iso buffer need not be mapped
* and the a->data pointer is ignored.
*/
payload = (unsigned long)a->data - client->vm_start;
buffer_end = client->buffer.page_count << PAGE_SHIFT;
if (a->data == 0 || client->buffer.pages == NULL ||
payload >= buffer_end) {
payload = 0;
buffer_end = 0;
}
if (ctx->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL && payload & 3)
return -EINVAL;
p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
end = (void __user *)p + a->size;
count = 0;
while (p < end) {
if (get_user(control, &p->control))
return -EFAULT;
u.packet.payload_length = GET_PAYLOAD_LENGTH(control);
u.packet.interrupt = GET_INTERRUPT(control);
u.packet.skip = GET_SKIP(control);
u.packet.tag = GET_TAG(control);
u.packet.sy = GET_SY(control);
u.packet.header_length = GET_HEADER_LENGTH(control);
switch (ctx->type) {
case FW_ISO_CONTEXT_TRANSMIT:
if (u.packet.header_length & 3)
return -EINVAL;
transmit_header_bytes = u.packet.header_length;
break;
case FW_ISO_CONTEXT_RECEIVE:
if (u.packet.header_length == 0 ||
u.packet.header_length % ctx->header_size != 0)
return -EINVAL;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
if (u.packet.payload_length == 0 ||
u.packet.payload_length & 3)
return -EINVAL;
break;
}
next = (struct fw_cdev_iso_packet __user *)
&p->header[transmit_header_bytes / 4];
if (next > end)
return -EINVAL;
if (copy_from_user
(u.packet.header, p->header, transmit_header_bytes))
return -EFAULT;
if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT &&
u.packet.header_length + u.packet.payload_length > 0)
return -EINVAL;
if (payload + u.packet.payload_length > buffer_end)
return -EINVAL;
if (fw_iso_context_queue(ctx, &u.packet,
&client->buffer, payload))
break;
p = next;
payload += u.packet.payload_length;
count++;
}
fw_iso_context_queue_flush(ctx);
a->size -= uptr_to_u64(p) - a->packets;
a->packets = uptr_to_u64(p);
a->data = client->vm_start + payload;
return count;
}
static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_start_iso *a = &arg->start_iso;
BUILD_BUG_ON(
FW_CDEV_ISO_CONTEXT_MATCH_TAG0 != FW_ISO_CONTEXT_MATCH_TAG0 ||
FW_CDEV_ISO_CONTEXT_MATCH_TAG1 != FW_ISO_CONTEXT_MATCH_TAG1 ||
FW_CDEV_ISO_CONTEXT_MATCH_TAG2 != FW_ISO_CONTEXT_MATCH_TAG2 ||
FW_CDEV_ISO_CONTEXT_MATCH_TAG3 != FW_ISO_CONTEXT_MATCH_TAG3 ||
FW_CDEV_ISO_CONTEXT_MATCH_ALL_TAGS != FW_ISO_CONTEXT_MATCH_ALL_TAGS);
if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
(a->tags == 0 || a->tags > 15 || a->sync > 15))
return -EINVAL;
return fw_iso_context_start(client->iso_context,
a->cycle, a->sync, a->tags);
}
static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_stop_iso *a = &arg->stop_iso;
if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
return fw_iso_context_stop(client->iso_context);
}
static int ioctl_flush_iso(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_flush_iso *a = &arg->flush_iso;
if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
return fw_iso_context_flush_completions(client->iso_context);
}
static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
struct fw_card *card = client->device->card;
struct timespec64 ts = {0, 0};
u32 cycle_time = 0;
int ret = 0;
local_irq_disable();
ret = fw_card_read_cycle_time(card, &cycle_time);
if (ret < 0)
goto end;
switch (a->clk_id) {
case CLOCK_REALTIME: ktime_get_real_ts64(&ts); break;
case CLOCK_MONOTONIC: ktime_get_ts64(&ts); break;
case CLOCK_MONOTONIC_RAW: ktime_get_raw_ts64(&ts); break;
default:
ret = -EINVAL;
}
end:
local_irq_enable();
a->tv_sec = ts.tv_sec;
a->tv_nsec = ts.tv_nsec;
a->cycle_timer = cycle_time;
return ret;
}
static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
struct fw_cdev_get_cycle_timer2 ct2;
ct2.clk_id = CLOCK_REALTIME;
ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
a->cycle_timer = ct2.cycle_timer;
return 0;
}
static void iso_resource_work(struct work_struct *work)
{
struct iso_resource_event *e;
struct iso_resource *r =
container_of(work, struct iso_resource, work.work);
struct client *client = r->client;
int generation, channel, bandwidth, todo;
bool skip, free, success;
spin_lock_irq(&client->lock);
generation = client->device->generation;
todo = r->todo;
/* Allow 1000ms grace period for other reallocations. */
if (todo == ISO_RES_ALLOC &&
time_before64(get_jiffies_64(),
client->device->card->reset_jiffies + HZ)) {
schedule_iso_resource(r, DIV_ROUND_UP(HZ, 3));
skip = true;
} else {
/* We could be called twice within the same generation. */
skip = todo == ISO_RES_REALLOC &&
r->generation == generation;
}
free = todo == ISO_RES_DEALLOC ||
todo == ISO_RES_ALLOC_ONCE ||
todo == ISO_RES_DEALLOC_ONCE;
r->generation = generation;
spin_unlock_irq(&client->lock);
if (skip)
goto out;
bandwidth = r->bandwidth;
fw_iso_resource_manage(client->device->card, generation,
r->channels, &channel, &bandwidth,
todo == ISO_RES_ALLOC ||
todo == ISO_RES_REALLOC ||
todo == ISO_RES_ALLOC_ONCE);
/*
* Is this generation outdated already? As long as this resource sticks
* in the idr, it will be scheduled again for a newer generation or at
* shutdown.
*/
if (channel == -EAGAIN &&
(todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC))
goto out;
success = channel >= 0 || bandwidth > 0;
spin_lock_irq(&client->lock);
/*
* Transit from allocation to reallocation, except if the client
* requested deallocation in the meantime.
*/
if (r->todo == ISO_RES_ALLOC)
r->todo = ISO_RES_REALLOC;
/*
* Allocation or reallocation failure? Pull this resource out of the
* idr and prepare for deletion, unless the client is shutting down.
*/
if (r->todo == ISO_RES_REALLOC && !success &&
!client->in_shutdown &&
idr_remove(&client->resource_idr, r->resource.handle)) {
client_put(client);
free = true;
}
spin_unlock_irq(&client->lock);
if (todo == ISO_RES_ALLOC && channel >= 0)
r->channels = 1ULL << channel;
if (todo == ISO_RES_REALLOC && success)
goto out;
if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) {
e = r->e_alloc;
r->e_alloc = NULL;
} else {
e = r->e_dealloc;
r->e_dealloc = NULL;
}
e->iso_resource.handle = r->resource.handle;
e->iso_resource.channel = channel;
e->iso_resource.bandwidth = bandwidth;
queue_event(client, &e->event,
&e->iso_resource, sizeof(e->iso_resource), NULL, 0);
if (free) {
cancel_delayed_work(&r->work);
kfree(r->e_alloc);
kfree(r->e_dealloc);
kfree(r);
}
out:
client_put(client);
}
static void release_iso_resource(struct client *client,
struct client_resource *resource)
{
struct iso_resource *r =
container_of(resource, struct iso_resource, resource);
spin_lock_irq(&client->lock);
r->todo = ISO_RES_DEALLOC;
schedule_iso_resource(r, 0);
spin_unlock_irq(&client->lock);
}
static int init_iso_resource(struct client *client,
struct fw_cdev_allocate_iso_resource *request, int todo)
{
struct iso_resource_event *e1, *e2;
struct iso_resource *r;
int ret;
if ((request->channels == 0 && request->bandwidth == 0) ||
request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
return -EINVAL;
r = kmalloc(sizeof(*r), GFP_KERNEL);
e1 = kmalloc(sizeof(*e1), GFP_KERNEL);
e2 = kmalloc(sizeof(*e2), GFP_KERNEL);
if (r == NULL || e1 == NULL || e2 == NULL) {
ret = -ENOMEM;
goto fail;
}
INIT_DELAYED_WORK(&r->work, iso_resource_work);
r->client = client;
r->todo = todo;
r->generation = -1;
r->channels = request->channels;
r->bandwidth = request->bandwidth;
r->e_alloc = e1;
r->e_dealloc = e2;
e1->iso_resource.closure = request->closure;
e1->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED;
e2->iso_resource.closure = request->closure;
e2->iso_resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED;
if (todo == ISO_RES_ALLOC) {
r->resource.release = release_iso_resource;
ret = add_client_resource(client, &r->resource, GFP_KERNEL);
if (ret < 0)
goto fail;
} else {
r->resource.release = NULL;
r->resource.handle = -1;
schedule_iso_resource(r, 0);
}
request->handle = r->resource.handle;
return 0;
fail:
kfree(r);
kfree(e1);
kfree(e2);
return ret;
}
static int ioctl_allocate_iso_resource(struct client *client,
union ioctl_arg *arg)
{
return init_iso_resource(client,
&arg->allocate_iso_resource, ISO_RES_ALLOC);
}
static int ioctl_deallocate_iso_resource(struct client *client,
union ioctl_arg *arg)
{
return release_client_resource(client,
arg->deallocate.handle, release_iso_resource, NULL);
}
static int ioctl_allocate_iso_resource_once(struct client *client,
union ioctl_arg *arg)
{
return init_iso_resource(client,
&arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
}
static int ioctl_deallocate_iso_resource_once(struct client *client,
union ioctl_arg *arg)
{
return init_iso_resource(client,
&arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
}
/*
* Returns a speed code: Maximum speed to or from this device,
* limited by the device's link speed, the local node's link speed,
* and all PHY port speeds between the two links.
*/
static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
{
return client->device->max_speed;
}
static int ioctl_send_broadcast_request(struct client *client,
union ioctl_arg *arg)
{
struct fw_cdev_send_request *a = &arg->send_request;
switch (a->tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
break;
default:
return -EINVAL;
}
/* Security policy: Only allow accesses to Units Space. */
if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
return -EACCES;
return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
}
static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
struct fw_cdev_send_request request;
int dest;
if (a->speed > client->device->card->link_speed ||
a->length > 1024 << a->speed)
return -EIO;
if (a->tag > 3 || a->channel > 63 || a->sy > 15)
return -EINVAL;
dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
request.tcode = TCODE_STREAM_DATA;
request.length = a->length;
request.closure = a->closure;
request.data = a->data;
request.generation = a->generation;
return init_request(client, &request, dest, a->speed);
}
static void outbound_phy_packet_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
struct outbound_phy_packet_event *e =
container_of(packet, struct outbound_phy_packet_event, p);
struct client *e_client = e->client;
u32 rcode;
switch (status) {
// expected:
case ACK_COMPLETE:
rcode = RCODE_COMPLETE;
break;
// should never happen with PHY packets:
case ACK_PENDING:
rcode = RCODE_COMPLETE;
break;
case ACK_BUSY_X:
case ACK_BUSY_A:
case ACK_BUSY_B:
rcode = RCODE_BUSY;
break;
case ACK_DATA_ERROR:
rcode = RCODE_DATA_ERROR;
break;
case ACK_TYPE_ERROR:
rcode = RCODE_TYPE_ERROR;
break;
// stale generation; cancelled; on certain controllers: no ack
default:
rcode = status;
break;
}
switch (e->phy_packet.without_tstamp.type) {
case FW_CDEV_EVENT_PHY_PACKET_SENT:
{
struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
pp->rcode = rcode;
pp->data[0] = packet->timestamp;
queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
NULL, 0);
break;
}
case FW_CDEV_EVENT_PHY_PACKET_SENT2:
{
struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
pp->rcode = rcode;
pp->tstamp = packet->timestamp;
queue_event(e->client, &e->event, &e->phy_packet, sizeof(*pp) + pp->length,
NULL, 0);
break;
}
default:
WARN_ON(1);
break;
}
client_put(e_client);
}
static int ioctl_send_phy_packet(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_send_phy_packet *a = &arg->send_phy_packet;
struct fw_card *card = client->device->card;
struct outbound_phy_packet_event *e;
/* Access policy: Allow this ioctl only on local nodes' device files. */
if (!client->device->is_local)
return -ENOSYS;
e = kzalloc(sizeof(*e) + sizeof(a->data), GFP_KERNEL);
if (e == NULL)
return -ENOMEM;
client_get(client);
e->client = client;
e->p.speed = SCODE_100;
e->p.generation = a->generation;
e->p.header[0] = TCODE_LINK_INTERNAL << 4;
e->p.header[1] = a->data[0];
e->p.header[2] = a->data[1];
e->p.header_length = 12;
e->p.callback = outbound_phy_packet_callback;
if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
pp->closure = a->closure;
pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT;
if (is_ping_packet(a->data))
pp->length = 4;
} else {
struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
pp->closure = a->closure;
pp->type = FW_CDEV_EVENT_PHY_PACKET_SENT2;
// Keep the data field so that application can match the response event to the
// request.
pp->length = sizeof(a->data);
memcpy(pp->data, a->data, sizeof(a->data));
}
card->driver->send_request(card, &e->p);
return 0;
}
static int ioctl_receive_phy_packets(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_receive_phy_packets *a = &arg->receive_phy_packets;
struct fw_card *card = client->device->card;
/* Access policy: Allow this ioctl only on local nodes' device files. */
if (!client->device->is_local)
return -ENOSYS;
spin_lock_irq(&card->lock);
list_move_tail(&client->phy_receiver_link, &card->phy_receiver_list);
client->phy_receiver_closure = a->closure;
spin_unlock_irq(&card->lock);
return 0;
}
void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p)
{
struct client *client;
struct inbound_phy_packet_event *e;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) {
e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC);
if (e == NULL)
break;
if (client->version < FW_CDEV_VERSION_EVENT_ASYNC_TSTAMP) {
struct fw_cdev_event_phy_packet *pp = &e->phy_packet.without_tstamp;
pp->closure = client->phy_receiver_closure;
pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED;
pp->rcode = RCODE_COMPLETE;
pp->length = 8;
pp->data[0] = p->header[1];
pp->data[1] = p->header[2];
queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
} else {
struct fw_cdev_event_phy_packet2 *pp = &e->phy_packet.with_tstamp;
pp = &e->phy_packet.with_tstamp;
pp->closure = client->phy_receiver_closure;
pp->type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED2;
pp->rcode = RCODE_COMPLETE;
pp->length = 8;
pp->tstamp = p->timestamp;
pp->data[0] = p->header[1];
pp->data[1] = p->header[2];
queue_event(client, &e->event, &e->phy_packet, sizeof(*pp) + 8, NULL, 0);
}
}
spin_unlock_irqrestore(&card->lock, flags);
}
static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
[0x00] = ioctl_get_info,
[0x01] = ioctl_send_request,
[0x02] = ioctl_allocate,
[0x03] = ioctl_deallocate,
[0x04] = ioctl_send_response,
[0x05] = ioctl_initiate_bus_reset,
[0x06] = ioctl_add_descriptor,
[0x07] = ioctl_remove_descriptor,
[0x08] = ioctl_create_iso_context,
[0x09] = ioctl_queue_iso,
[0x0a] = ioctl_start_iso,
[0x0b] = ioctl_stop_iso,
[0x0c] = ioctl_get_cycle_timer,
[0x0d] = ioctl_allocate_iso_resource,
[0x0e] = ioctl_deallocate_iso_resource,
[0x0f] = ioctl_allocate_iso_resource_once,
[0x10] = ioctl_deallocate_iso_resource_once,
[0x11] = ioctl_get_speed,
[0x12] = ioctl_send_broadcast_request,
[0x13] = ioctl_send_stream_packet,
[0x14] = ioctl_get_cycle_timer2,
[0x15] = ioctl_send_phy_packet,
[0x16] = ioctl_receive_phy_packets,
[0x17] = ioctl_set_iso_channels,
[0x18] = ioctl_flush_iso,
};
static int dispatch_ioctl(struct client *client,
unsigned int cmd, void __user *arg)
{
union ioctl_arg buffer;
int ret;
if (fw_device_is_shutdown(client->device))
return -ENODEV;
if (_IOC_TYPE(cmd) != '#' ||
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers) ||
_IOC_SIZE(cmd) > sizeof(buffer))
return -ENOTTY;
memset(&buffer, 0, sizeof(buffer));
if (_IOC_DIR(cmd) & _IOC_WRITE)
if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
return -EFAULT;
ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
if (ret < 0)
return ret;
if (_IOC_DIR(cmd) & _IOC_READ)
if (copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
return -EFAULT;
return ret;
}
static long fw_device_op_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
}
static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
{
struct client *client = file->private_data;
unsigned long size;
int page_count, ret;
if (fw_device_is_shutdown(client->device))
return -ENODEV;
/* FIXME: We could support multiple buffers, but we don't. */
if (client->buffer.pages != NULL)
return -EBUSY;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
if (vma->vm_start & ~PAGE_MASK)
return -EINVAL;
client->vm_start = vma->vm_start;
size = vma->vm_end - vma->vm_start;
page_count = size >> PAGE_SHIFT;
if (size & ~PAGE_MASK)
return -EINVAL;
ret = fw_iso_buffer_alloc(&client->buffer, page_count);
if (ret < 0)
return ret;
spin_lock_irq(&client->lock);
if (client->iso_context) {
ret = fw_iso_buffer_map_dma(&client->buffer,
client->device->card,
iso_dma_direction(client->iso_context));
client->buffer_is_mapped = (ret == 0);
}
spin_unlock_irq(&client->lock);
if (ret < 0)
goto fail;
ret = vm_map_pages_zero(vma, client->buffer.pages,
client->buffer.page_count);
if (ret < 0)
goto fail;
return 0;
fail:
fw_iso_buffer_destroy(&client->buffer, client->device->card);
return ret;
}
static int is_outbound_transaction_resource(int id, void *p, void *data)
{
struct client_resource *resource = p;
return resource->release == release_transaction;
}
static int has_outbound_transactions(struct client *client)
{
int ret;
spin_lock_irq(&client->lock);
ret = idr_for_each(&client->resource_idr,
is_outbound_transaction_resource, NULL);
spin_unlock_irq(&client->lock);
return ret;
}
static int shutdown_resource(int id, void *p, void *data)
{
struct client_resource *resource = p;
struct client *client = data;
resource->release(client, resource);
client_put(client);
return 0;
}
static int fw_device_op_release(struct inode *inode, struct file *file)
{
struct client *client = file->private_data;
struct event *event, *next_event;
spin_lock_irq(&client->device->card->lock);
list_del(&client->phy_receiver_link);
spin_unlock_irq(&client->device->card->lock);
mutex_lock(&client->device->client_list_mutex);
list_del(&client->link);
mutex_unlock(&client->device->client_list_mutex);
if (client->iso_context)
fw_iso_context_destroy(client->iso_context);
if (client->buffer.pages)
fw_iso_buffer_destroy(&client->buffer, client->device->card);
/* Freeze client->resource_idr and client->event_list */
spin_lock_irq(&client->lock);
client->in_shutdown = true;
spin_unlock_irq(&client->lock);
wait_event(client->tx_flush_wait, !has_outbound_transactions(client));
idr_for_each(&client->resource_idr, shutdown_resource, client);
idr_destroy(&client->resource_idr);
list_for_each_entry_safe(event, next_event, &client->event_list, link)
kfree(event);
client_put(client);
return 0;
}
static __poll_t fw_device_op_poll(struct file *file, poll_table * pt)
{
struct client *client = file->private_data;
__poll_t mask = 0;
poll_wait(file, &client->wait, pt);
if (fw_device_is_shutdown(client->device))
mask |= EPOLLHUP | EPOLLERR;
if (!list_empty(&client->event_list))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
}
const struct file_operations fw_device_ops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = fw_device_op_open,
.read = fw_device_op_read,
.unlocked_ioctl = fw_device_op_ioctl,
.mmap = fw_device_op_mmap,
.release = fw_device_op_release,
.poll = fw_device_op_poll,
.compat_ioctl = compat_ptr_ioctl,
};
| linux-master | drivers/firewire/core-cdev.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for OHCI 1394 controllers
*
* Copyright (C) 2003-2006 Kristian Hoegsberg <[email protected]>
*/
#include <linux/bitops.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/time.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#ifdef CONFIG_PPC_PMAC
#include <asm/pmac_feature.h>
#endif
#include "core.h"
#include "ohci.h"
#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
#define DESCRIPTOR_OUTPUT_MORE 0
#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
#define DESCRIPTOR_INPUT_MORE (2 << 12)
#define DESCRIPTOR_INPUT_LAST (3 << 12)
#define DESCRIPTOR_STATUS (1 << 11)
#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
#define DESCRIPTOR_PING (1 << 7)
#define DESCRIPTOR_YY (1 << 6)
#define DESCRIPTOR_NO_IRQ (0 << 4)
#define DESCRIPTOR_IRQ_ERROR (1 << 4)
#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
#define DESCRIPTOR_WAIT (3 << 0)
#define DESCRIPTOR_CMD (0xf << 12)
struct descriptor {
__le16 req_count;
__le16 control;
__le32 data_address;
__le32 branch_address;
__le16 res_count;
__le16 transfer_status;
} __attribute__((aligned(16)));
#define CONTROL_SET(regs) (regs)
#define CONTROL_CLEAR(regs) ((regs) + 4)
#define COMMAND_PTR(regs) ((regs) + 12)
#define CONTEXT_MATCH(regs) ((regs) + 16)
#define AR_BUFFER_SIZE (32*1024)
#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
/* we need at least two pages for proper list management */
#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
#define MAX_ASYNC_PAYLOAD 4096
#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
struct ar_context {
struct fw_ohci *ohci;
struct page *pages[AR_BUFFERS];
void *buffer;
struct descriptor *descriptors;
dma_addr_t descriptors_bus;
void *pointer;
unsigned int last_buffer_index;
u32 regs;
struct tasklet_struct tasklet;
};
struct context;
typedef int (*descriptor_callback_t)(struct context *ctx,
struct descriptor *d,
struct descriptor *last);
/*
* A buffer that contains a block of DMA-able coherent memory used for
* storing a portion of a DMA descriptor program.
*/
struct descriptor_buffer {
struct list_head list;
dma_addr_t buffer_bus;
size_t buffer_size;
size_t used;
struct descriptor buffer[];
};
struct context {
struct fw_ohci *ohci;
u32 regs;
int total_allocation;
u32 current_bus;
bool running;
bool flushing;
/*
* List of page-sized buffers for storing DMA descriptors.
* Head of list contains buffers in use and tail of list contains
* free buffers.
*/
struct list_head buffer_list;
/*
* Pointer to a buffer inside buffer_list that contains the tail
* end of the current DMA program.
*/
struct descriptor_buffer *buffer_tail;
/*
* The descriptor containing the branch address of the first
* descriptor that has not yet been filled by the device.
*/
struct descriptor *last;
/*
* The last descriptor block in the DMA program. It contains the branch
* address that must be updated upon appending a new descriptor.
*/
struct descriptor *prev;
int prev_z;
descriptor_callback_t callback;
struct tasklet_struct tasklet;
};
#define IT_HEADER_SY(v) ((v) << 0)
#define IT_HEADER_TCODE(v) ((v) << 4)
#define IT_HEADER_CHANNEL(v) ((v) << 8)
#define IT_HEADER_TAG(v) ((v) << 14)
#define IT_HEADER_SPEED(v) ((v) << 16)
#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
struct iso_context {
struct fw_iso_context base;
struct context context;
void *header;
size_t header_length;
unsigned long flushing_completions;
u32 mc_buffer_bus;
u16 mc_completed;
u16 last_timestamp;
u8 sync;
u8 tags;
};
#define CONFIG_ROM_SIZE 1024
struct fw_ohci {
struct fw_card card;
__iomem char *registers;
int node_id;
int generation;
int request_generation; /* for timestamping incoming requests */
unsigned quirks;
unsigned int pri_req_max;
u32 bus_time;
bool bus_time_running;
bool is_root;
bool csr_state_setclear_abdicate;
int n_ir;
int n_it;
/*
* Spinlock for accessing fw_ohci data. Never call out of
* this driver with this lock held.
*/
spinlock_t lock;
struct mutex phy_reg_mutex;
void *misc_buffer;
dma_addr_t misc_buffer_bus;
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
struct context at_request_ctx;
struct context at_response_ctx;
u32 it_context_support;
u32 it_context_mask; /* unoccupied IT contexts */
struct iso_context *it_context_list;
u64 ir_context_channels; /* unoccupied channels */
u32 ir_context_support;
u32 ir_context_mask; /* unoccupied IR contexts */
struct iso_context *ir_context_list;
u64 mc_channels; /* channels in use by the multichannel IR context */
bool mc_allocated;
__be32 *config_rom;
dma_addr_t config_rom_bus;
__be32 *next_config_rom;
dma_addr_t next_config_rom_bus;
__be32 next_header;
__le32 *self_id;
dma_addr_t self_id_bus;
struct work_struct bus_reset_work;
u32 self_id_buffer[512];
};
static struct workqueue_struct *selfid_workqueue;
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
{
return container_of(card, struct fw_ohci, card);
}
#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
#define IR_CONTEXT_BUFFER_FILL 0x80000000
#define IR_CONTEXT_ISOCH_HEADER 0x40000000
#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
#define CONTEXT_RUN 0x8000
#define CONTEXT_WAKE 0x1000
#define CONTEXT_DEAD 0x0800
#define CONTEXT_ACTIVE 0x0400
#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
#define OHCI1394_REGISTER_SIZE 0x800
#define OHCI1394_PCI_HCI_Control 0x40
#define SELF_ID_BUF_SIZE 0x800
#define OHCI_TCODE_PHY_PACKET 0x0e
#define OHCI_VERSION_1_1 0x010010
static char ohci_driver_name[] = KBUILD_MODNAME;
#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
#define PCI_DEVICE_ID_VIA_VT630X 0x3044
#define PCI_REV_ID_VIA_VT6306 0x46
#define PCI_DEVICE_ID_VIA_VT6315 0x3403
#define QUIRK_CYCLE_TIMER 0x1
#define QUIRK_RESET_PACKET 0x2
#define QUIRK_BE_HEADERS 0x4
#define QUIRK_NO_1394A 0x8
#define QUIRK_NO_MSI 0x10
#define QUIRK_TI_SLLZ059 0x20
#define QUIRK_IR_WAKE 0x40
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
unsigned short vendor, device, revision, flags;
} ohci_quirks[] = {
{PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
QUIRK_BE_HEADERS},
{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
QUIRK_NO_MSI},
{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
QUIRK_NO_MSI},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_NO_MSI},
{PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
{PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
QUIRK_NO_MSI},
{PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
};
/* This overrides anything that was found in ohci_quirks[]. */
static int param_quirks;
module_param_named(quirks, param_quirks, int, 0644);
MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
", disable MSI = " __stringify(QUIRK_NO_MSI)
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
#define OHCI_PARAM_DEBUG_SELFIDS 2
#define OHCI_PARAM_DEBUG_IRQS 4
#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
static int param_debug;
module_param_named(debug, param_debug, int, 0644);
MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
", or a combination, or all = -1)");
static bool param_remote_dma;
module_param_named(remote_dma, param_remote_dma, bool, 0444);
MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
static void log_irqs(struct fw_ohci *ohci, u32 evt)
{
if (likely(!(param_debug &
(OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
return;
if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
!(evt & OHCI1394_busReset))
return;
ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
evt & OHCI1394_reqTxComplete ? " AT_req" : "",
evt & OHCI1394_respTxComplete ? " AT_resp" : "",
evt & OHCI1394_isochRx ? " IR" : "",
evt & OHCI1394_isochTx ? " IT" : "",
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
evt & OHCI1394_busReset ? " busReset" : "",
evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
OHCI1394_respTxComplete | OHCI1394_isochRx |
OHCI1394_isochTx | OHCI1394_postedWriteErr |
OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
OHCI1394_cycleInconsistent |
OHCI1394_regAccessFail | OHCI1394_busReset)
? " ?" : "");
}
static const char *speed[] = {
[0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
};
static const char *power[] = {
[0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
[4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
};
static const char port[] = { '.', '-', 'p', 'c', };
static char _p(u32 *s, int shift)
{
return port[*s >> shift & 3];
}
static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
{
u32 *s;
if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
return;
ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
self_id_count, generation, ohci->node_id);
for (s = ohci->self_id_buffer; self_id_count--; ++s)
if ((*s & 1 << 23) == 0)
ohci_notice(ohci,
"selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
*s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
speed[*s >> 14 & 3], *s >> 16 & 63,
power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
*s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
else
ohci_notice(ohci,
"selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
*s, *s >> 24 & 63,
_p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
_p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
}
static const char *evts[] = {
[0x00] = "evt_no_status", [0x01] = "-reserved-",
[0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
[0x04] = "evt_underrun", [0x05] = "evt_overrun",
[0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
[0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
[0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
[0x0c] = "-reserved-", [0x0d] = "-reserved-",
[0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
[0x10] = "-reserved-", [0x11] = "ack_complete",
[0x12] = "ack_pending ", [0x13] = "-reserved-",
[0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
[0x16] = "ack_busy_B", [0x17] = "-reserved-",
[0x18] = "-reserved-", [0x19] = "-reserved-",
[0x1a] = "-reserved-", [0x1b] = "ack_tardy",
[0x1c] = "-reserved-", [0x1d] = "ack_data_error",
[0x1e] = "ack_type_error", [0x1f] = "-reserved-",
[0x20] = "pending/cancelled",
};
static const char *tcodes[] = {
[0x0] = "QW req", [0x1] = "BW req",
[0x2] = "W resp", [0x3] = "-reserved-",
[0x4] = "QR req", [0x5] = "BR req",
[0x6] = "QR resp", [0x7] = "BR resp",
[0x8] = "cycle start", [0x9] = "Lk req",
[0xa] = "async stream packet", [0xb] = "Lk resp",
[0xc] = "-reserved-", [0xd] = "-reserved-",
[0xe] = "link internal", [0xf] = "-reserved-",
};
static void log_ar_at_event(struct fw_ohci *ohci,
char dir, int speed, u32 *header, int evt)
{
int tcode = header[0] >> 4 & 0xf;
char specific[12];
if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
return;
if (unlikely(evt >= ARRAY_SIZE(evts)))
evt = 0x1f;
if (evt == OHCI1394_evt_bus_reset) {
ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
dir, (header[2] >> 16) & 0xff);
return;
}
switch (tcode) {
case 0x0: case 0x6: case 0x8:
snprintf(specific, sizeof(specific), " = %08x",
be32_to_cpu((__force __be32)header[3]));
break;
case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
snprintf(specific, sizeof(specific), " %x,%x",
header[3] >> 16, header[3] & 0xffff);
break;
default:
specific[0] = '\0';
}
switch (tcode) {
case 0xa:
ohci_notice(ohci, "A%c %s, %s\n",
dir, evts[evt], tcodes[tcode]);
break;
case 0xe:
ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
dir, evts[evt], header[1], header[2]);
break;
case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
ohci_notice(ohci,
"A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
dir, speed, header[0] >> 10 & 0x3f,
header[1] >> 16, header[0] >> 16, evts[evt],
tcodes[tcode], header[1] & 0xffff, header[2], specific);
break;
default:
ohci_notice(ohci,
"A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
dir, speed, header[0] >> 10 & 0x3f,
header[1] >> 16, header[0] >> 16, evts[evt],
tcodes[tcode], specific);
}
}
static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
{
writel(data, ohci->registers + offset);
}
static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
{
return readl(ohci->registers + offset);
}
static inline void flush_writes(const struct fw_ohci *ohci)
{
/* Do a dummy read to flush writes. */
reg_read(ohci, OHCI1394_Version);
}
/*
* Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
* read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
* In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
* directly. Exceptions are intrinsically serialized contexts like pci_probe.
*/
static int read_phy_reg(struct fw_ohci *ohci, int addr)
{
u32 val;
int i;
reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
if (!~val)
return -ENODEV; /* Card was ejected. */
if (val & OHCI1394_PhyControl_ReadDone)
return OHCI1394_PhyControl_ReadData(val);
/*
* Try a few times without waiting. Sleeping is necessary
* only when the link/PHY interface is busy.
*/
if (i >= 3)
msleep(1);
}
ohci_err(ohci, "failed to read phy reg %d\n", addr);
dump_stack();
return -EBUSY;
}
static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
{
int i;
reg_write(ohci, OHCI1394_PhyControl,
OHCI1394_PhyControl_Write(addr, val));
for (i = 0; i < 3 + 100; i++) {
val = reg_read(ohci, OHCI1394_PhyControl);
if (!~val)
return -ENODEV; /* Card was ejected. */
if (!(val & OHCI1394_PhyControl_WritePending))
return 0;
if (i >= 3)
msleep(1);
}
ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
dump_stack();
return -EBUSY;
}
static int update_phy_reg(struct fw_ohci *ohci, int addr,
int clear_bits, int set_bits)
{
int ret = read_phy_reg(ohci, addr);
if (ret < 0)
return ret;
/*
* The interrupt status bits are cleared by writing a one bit.
* Avoid clearing them unless explicitly requested in set_bits.
*/
if (addr == 5)
clear_bits |= PHY_INT_STATUS_BITS;
return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
}
static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
{
int ret;
ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
if (ret < 0)
return ret;
return read_phy_reg(ohci, addr);
}
static int ohci_read_phy_reg(struct fw_card *card, int addr)
{
struct fw_ohci *ohci = fw_ohci(card);
int ret;
mutex_lock(&ohci->phy_reg_mutex);
ret = read_phy_reg(ohci, addr);
mutex_unlock(&ohci->phy_reg_mutex);
return ret;
}
static int ohci_update_phy_reg(struct fw_card *card, int addr,
int clear_bits, int set_bits)
{
struct fw_ohci *ohci = fw_ohci(card);
int ret;
mutex_lock(&ohci->phy_reg_mutex);
ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
mutex_unlock(&ohci->phy_reg_mutex);
return ret;
}
static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
{
return page_private(ctx->pages[i]);
}
static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
{
struct descriptor *d;
d = &ctx->descriptors[index];
d->branch_address &= cpu_to_le32(~0xf);
d->res_count = cpu_to_le16(PAGE_SIZE);
d->transfer_status = 0;
wmb(); /* finish init of new descriptors before branch_address update */
d = &ctx->descriptors[ctx->last_buffer_index];
d->branch_address |= cpu_to_le32(1);
ctx->last_buffer_index = index;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
}
static void ar_context_release(struct ar_context *ctx)
{
struct device *dev = ctx->ohci->card.device;
unsigned int i;
if (!ctx->buffer)
return;
vunmap(ctx->buffer);
for (i = 0; i < AR_BUFFERS; i++) {
if (ctx->pages[i])
dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
}
}
static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
{
struct fw_ohci *ohci = ctx->ohci;
if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
flush_writes(ohci);
ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
}
/* FIXME: restart? */
}
static inline unsigned int ar_next_buffer_index(unsigned int index)
{
return (index + 1) % AR_BUFFERS;
}
static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
{
return ar_next_buffer_index(ctx->last_buffer_index);
}
/*
* We search for the buffer that contains the last AR packet DMA data written
* by the controller.
*/
static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
unsigned int *buffer_offset)
{
unsigned int i, next_i, last = ctx->last_buffer_index;
__le16 res_count, next_res_count;
i = ar_first_buffer_index(ctx);
res_count = READ_ONCE(ctx->descriptors[i].res_count);
/* A buffer that is not yet completely filled must be the last one. */
while (i != last && res_count == 0) {
/* Peek at the next descriptor. */
next_i = ar_next_buffer_index(i);
rmb(); /* read descriptors in order */
next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
/*
* If the next descriptor is still empty, we must stop at this
* descriptor.
*/
if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
/*
* The exception is when the DMA data for one packet is
* split over three buffers; in this case, the middle
* buffer's descriptor might be never updated by the
* controller and look still empty, and we have to peek
* at the third one.
*/
if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
next_i = ar_next_buffer_index(next_i);
rmb();
next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
if (next_res_count != cpu_to_le16(PAGE_SIZE))
goto next_buffer_is_active;
}
break;
}
next_buffer_is_active:
i = next_i;
res_count = next_res_count;
}
rmb(); /* read res_count before the DMA data */
*buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
if (*buffer_offset > PAGE_SIZE) {
*buffer_offset = 0;
ar_context_abort(ctx, "corrupted descriptor");
}
return i;
}
static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
unsigned int end_buffer_index,
unsigned int end_buffer_offset)
{
unsigned int i;
i = ar_first_buffer_index(ctx);
while (i != end_buffer_index) {
dma_sync_single_for_cpu(ctx->ohci->card.device,
ar_buffer_bus(ctx, i),
PAGE_SIZE, DMA_FROM_DEVICE);
i = ar_next_buffer_index(i);
}
if (end_buffer_offset > 0)
dma_sync_single_for_cpu(ctx->ohci->card.device,
ar_buffer_bus(ctx, i),
end_buffer_offset, DMA_FROM_DEVICE);
}
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
#define cond_le32_to_cpu(v) \
(ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
#else
#define cond_le32_to_cpu(v) le32_to_cpu(v)
#endif
static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
{
struct fw_ohci *ohci = ctx->ohci;
struct fw_packet p;
u32 status, length, tcode;
int evt;
p.header[0] = cond_le32_to_cpu(buffer[0]);
p.header[1] = cond_le32_to_cpu(buffer[1]);
p.header[2] = cond_le32_to_cpu(buffer[2]);
tcode = (p.header[0] >> 4) & 0x0f;
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_READ_QUADLET_RESPONSE:
p.header[3] = (__force __u32) buffer[3];
p.header_length = 16;
p.payload_length = 0;
break;
case TCODE_READ_BLOCK_REQUEST :
p.header[3] = cond_le32_to_cpu(buffer[3]);
p.header_length = 16;
p.payload_length = 0;
break;
case TCODE_WRITE_BLOCK_REQUEST:
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
p.header[3] = cond_le32_to_cpu(buffer[3]);
p.header_length = 16;
p.payload_length = p.header[3] >> 16;
if (p.payload_length > MAX_ASYNC_PAYLOAD) {
ar_context_abort(ctx, "invalid packet length");
return NULL;
}
break;
case TCODE_WRITE_RESPONSE:
case TCODE_READ_QUADLET_REQUEST:
case OHCI_TCODE_PHY_PACKET:
p.header_length = 12;
p.payload_length = 0;
break;
default:
ar_context_abort(ctx, "invalid tcode");
return NULL;
}
p.payload = (void *) buffer + p.header_length;
/* FIXME: What to do about evt_* errors? */
length = (p.header_length + p.payload_length + 3) / 4;
status = cond_le32_to_cpu(buffer[length]);
evt = (status >> 16) & 0x1f;
p.ack = evt - 16;
p.speed = (status >> 21) & 0x7;
p.timestamp = status & 0xffff;
p.generation = ohci->request_generation;
log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
/*
* Several controllers, notably from NEC and VIA, forget to
* write ack_complete status at PHY packet reception.
*/
if (evt == OHCI1394_evt_no_status &&
(p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
p.ack = ACK_COMPLETE;
/*
* The OHCI bus reset handler synthesizes a PHY packet with
* the new generation number when a bus reset happens (see
* section 8.4.2.3). This helps us determine when a request
* was received and make sure we send the response in the same
* generation. We only need this for requests; for responses
* we use the unique tlabel for finding the matching
* request.
*
* Alas some chips sometimes emit bus reset packets with a
* wrong generation. We set the correct generation for these
* at a slightly incorrect time (in bus_reset_work).
*/
if (evt == OHCI1394_evt_bus_reset) {
if (!(ohci->quirks & QUIRK_RESET_PACKET))
ohci->request_generation = (p.header[2] >> 16) & 0xff;
} else if (ctx == &ohci->ar_request_ctx) {
fw_core_handle_request(&ohci->card, &p);
} else {
fw_core_handle_response(&ohci->card, &p);
}
return buffer + length + 1;
}
static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
{
void *next;
while (p < end) {
next = handle_ar_packet(ctx, p);
if (!next)
return p;
p = next;
}
return p;
}
static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
{
unsigned int i;
i = ar_first_buffer_index(ctx);
while (i != end_buffer) {
dma_sync_single_for_device(ctx->ohci->card.device,
ar_buffer_bus(ctx, i),
PAGE_SIZE, DMA_FROM_DEVICE);
ar_context_link_page(ctx, i);
i = ar_next_buffer_index(i);
}
}
static void ar_context_tasklet(unsigned long data)
{
struct ar_context *ctx = (struct ar_context *)data;
unsigned int end_buffer_index, end_buffer_offset;
void *p, *end;
p = ctx->pointer;
if (!p)
return;
end_buffer_index = ar_search_last_active_buffer(ctx,
&end_buffer_offset);
ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
if (end_buffer_index < ar_first_buffer_index(ctx)) {
/*
* The filled part of the overall buffer wraps around; handle
* all packets up to the buffer end here. If the last packet
* wraps around, its tail will be visible after the buffer end
* because the buffer start pages are mapped there again.
*/
void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
p = handle_ar_packets(ctx, p, buffer_end);
if (p < buffer_end)
goto error;
/* adjust p to point back into the actual buffer */
p -= AR_BUFFERS * PAGE_SIZE;
}
p = handle_ar_packets(ctx, p, end);
if (p != end) {
if (p > end)
ar_context_abort(ctx, "inconsistent descriptor");
goto error;
}
ctx->pointer = p;
ar_recycle_buffers(ctx, end_buffer_index);
return;
error:
ctx->pointer = NULL;
}
static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
unsigned int descriptors_offset, u32 regs)
{
struct device *dev = ohci->card.device;
unsigned int i;
dma_addr_t dma_addr;
struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
struct descriptor *d;
ctx->regs = regs;
ctx->ohci = ohci;
tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
for (i = 0; i < AR_BUFFERS; i++) {
ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
DMA_FROM_DEVICE, GFP_KERNEL);
if (!ctx->pages[i])
goto out_of_memory;
set_page_private(ctx->pages[i], dma_addr);
dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
DMA_FROM_DEVICE);
}
for (i = 0; i < AR_BUFFERS; i++)
pages[i] = ctx->pages[i];
for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
pages[AR_BUFFERS + i] = ctx->pages[i];
ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
if (!ctx->buffer)
goto out_of_memory;
ctx->descriptors = ohci->misc_buffer + descriptors_offset;
ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
for (i = 0; i < AR_BUFFERS; i++) {
d = &ctx->descriptors[i];
d->req_count = cpu_to_le16(PAGE_SIZE);
d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
DESCRIPTOR_STATUS |
DESCRIPTOR_BRANCH_ALWAYS);
d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
d->branch_address = cpu_to_le32(ctx->descriptors_bus +
ar_next_buffer_index(i) * sizeof(struct descriptor));
}
return 0;
out_of_memory:
ar_context_release(ctx);
return -ENOMEM;
}
static void ar_context_run(struct ar_context *ctx)
{
unsigned int i;
for (i = 0; i < AR_BUFFERS; i++)
ar_context_link_page(ctx, i);
ctx->pointer = ctx->buffer;
reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
}
static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
{
__le16 branch;
branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
/* figure out which descriptor the branch address goes in */
if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
return d;
else
return d + z - 1;
}
static void context_tasklet(unsigned long data)
{
struct context *ctx = (struct context *) data;
struct descriptor *d, *last;
u32 address;
int z;
struct descriptor_buffer *desc;
desc = list_entry(ctx->buffer_list.next,
struct descriptor_buffer, list);
last = ctx->last;
while (last->branch_address != 0) {
struct descriptor_buffer *old_desc = desc;
address = le32_to_cpu(last->branch_address);
z = address & 0xf;
address &= ~0xf;
ctx->current_bus = address;
/* If the branch address points to a buffer outside of the
* current buffer, advance to the next buffer. */
if (address < desc->buffer_bus ||
address >= desc->buffer_bus + desc->used)
desc = list_entry(desc->list.next,
struct descriptor_buffer, list);
d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
last = find_branch_descriptor(d, z);
if (!ctx->callback(ctx, d, last))
break;
if (old_desc != desc) {
/* If we've advanced to the next buffer, move the
* previous buffer to the free list. */
unsigned long flags;
old_desc->used = 0;
spin_lock_irqsave(&ctx->ohci->lock, flags);
list_move_tail(&old_desc->list, &ctx->buffer_list);
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
}
ctx->last = last;
}
}
/*
* Allocate a new buffer and add it to the list of free buffers for this
* context. Must be called with ohci->lock held.
*/
static int context_add_buffer(struct context *ctx)
{
struct descriptor_buffer *desc;
dma_addr_t bus_addr;
int offset;
/*
* 16MB of descriptors should be far more than enough for any DMA
* program. This will catch run-away userspace or DoS attacks.
*/
if (ctx->total_allocation >= 16*1024*1024)
return -ENOMEM;
desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC);
if (!desc)
return -ENOMEM;
offset = (void *)&desc->buffer - (void *)desc;
/*
* Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
* for descriptors, even 0x10-byte ones. This can cause page faults when
* an IOMMU is in use and the oversized read crosses a page boundary.
* Work around this by always leaving at least 0x10 bytes of padding.
*/
desc->buffer_size = PAGE_SIZE - offset - 0x10;
desc->buffer_bus = bus_addr + offset;
desc->used = 0;
list_add_tail(&desc->list, &ctx->buffer_list);
ctx->total_allocation += PAGE_SIZE;
return 0;
}
static int context_init(struct context *ctx, struct fw_ohci *ohci,
u32 regs, descriptor_callback_t callback)
{
ctx->ohci = ohci;
ctx->regs = regs;
ctx->total_allocation = 0;
INIT_LIST_HEAD(&ctx->buffer_list);
if (context_add_buffer(ctx) < 0)
return -ENOMEM;
ctx->buffer_tail = list_entry(ctx->buffer_list.next,
struct descriptor_buffer, list);
tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
ctx->callback = callback;
/*
* We put a dummy descriptor in the buffer that has a NULL
* branch address and looks like it's been sent. That way we
* have a descriptor to append DMA programs to.
*/
memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
ctx->last = ctx->buffer_tail->buffer;
ctx->prev = ctx->buffer_tail->buffer;
ctx->prev_z = 1;
return 0;
}
static void context_release(struct context *ctx)
{
struct fw_card *card = &ctx->ohci->card;
struct descriptor_buffer *desc, *tmp;
list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) {
dmam_free_coherent(card->device, PAGE_SIZE, desc,
desc->buffer_bus - ((void *)&desc->buffer - (void *)desc));
}
}
/* Must be called with ohci->lock held */
static struct descriptor *context_get_descriptors(struct context *ctx,
int z, dma_addr_t *d_bus)
{
struct descriptor *d = NULL;
struct descriptor_buffer *desc = ctx->buffer_tail;
if (z * sizeof(*d) > desc->buffer_size)
return NULL;
if (z * sizeof(*d) > desc->buffer_size - desc->used) {
/* No room for the descriptor in this buffer, so advance to the
* next one. */
if (desc->list.next == &ctx->buffer_list) {
/* If there is no free buffer next in the list,
* allocate one. */
if (context_add_buffer(ctx) < 0)
return NULL;
}
desc = list_entry(desc->list.next,
struct descriptor_buffer, list);
ctx->buffer_tail = desc;
}
d = desc->buffer + desc->used / sizeof(*d);
memset(d, 0, z * sizeof(*d));
*d_bus = desc->buffer_bus + desc->used;
return d;
}
static void context_run(struct context *ctx, u32 extra)
{
struct fw_ohci *ohci = ctx->ohci;
reg_write(ohci, COMMAND_PTR(ctx->regs),
le32_to_cpu(ctx->last->branch_address));
reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
ctx->running = true;
flush_writes(ohci);
}
static void context_append(struct context *ctx,
struct descriptor *d, int z, int extra)
{
dma_addr_t d_bus;
struct descriptor_buffer *desc = ctx->buffer_tail;
struct descriptor *d_branch;
d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
desc->used += (z + extra) * sizeof(*d);
wmb(); /* finish init of new descriptors before branch_address update */
d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
d_branch->branch_address = cpu_to_le32(d_bus | z);
/*
* VT6306 incorrectly checks only the single descriptor at the
* CommandPtr when the wake bit is written, so if it's a
* multi-descriptor block starting with an INPUT_MORE, put a copy of
* the branch address in the first descriptor.
*
* Not doing this for transmit contexts since not sure how it interacts
* with skip addresses.
*/
if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
d_branch != ctx->prev &&
(ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
ctx->prev->branch_address = cpu_to_le32(d_bus | z);
}
ctx->prev = d;
ctx->prev_z = z;
}
static void context_stop(struct context *ctx)
{
struct fw_ohci *ohci = ctx->ohci;
u32 reg;
int i;
reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
ctx->running = false;
for (i = 0; i < 1000; i++) {
reg = reg_read(ohci, CONTROL_SET(ctx->regs));
if ((reg & CONTEXT_ACTIVE) == 0)
return;
if (i)
udelay(10);
}
ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
}
struct driver_data {
u8 inline_data[8];
struct fw_packet *packet;
};
/*
* This function apppends a packet to the DMA queue for transmission.
* Must always be called with the ochi->lock held to ensure proper
* generation handling and locking around packet queue manipulation.
*/
static int at_context_queue_packet(struct context *ctx,
struct fw_packet *packet)
{
struct fw_ohci *ohci = ctx->ohci;
dma_addr_t d_bus, payload_bus;
struct driver_data *driver_data;
struct descriptor *d, *last;
__le32 *header;
int z, tcode;
d = context_get_descriptors(ctx, 4, &d_bus);
if (d == NULL) {
packet->ack = RCODE_SEND_ERROR;
return -1;
}
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
d[0].res_count = cpu_to_le16(packet->timestamp);
/*
* The DMA format for asynchronous link packets is different
* from the IEEE1394 layout, so shift the fields around
* accordingly.
*/
tcode = (packet->header[0] >> 4) & 0x0f;
header = (__le32 *) &d[1];
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
case TCODE_WRITE_RESPONSE:
case TCODE_READ_QUADLET_REQUEST:
case TCODE_READ_BLOCK_REQUEST:
case TCODE_READ_QUADLET_RESPONSE:
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_REQUEST:
case TCODE_LOCK_RESPONSE:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16));
header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
(packet->header[0] & 0xffff0000));
header[2] = cpu_to_le32(packet->header[2]);
if (TCODE_IS_BLOCK_PACKET(tcode))
header[3] = cpu_to_le32(packet->header[3]);
else
header[3] = (__force __le32) packet->header[3];
d[0].req_count = cpu_to_le16(packet->header_length);
break;
case TCODE_LINK_INTERNAL:
header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
(packet->speed << 16));
header[1] = cpu_to_le32(packet->header[1]);
header[2] = cpu_to_le32(packet->header[2]);
d[0].req_count = cpu_to_le16(12);
if (is_ping_packet(&packet->header[1]))
d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
break;
case TCODE_STREAM_DATA:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16));
header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
d[0].req_count = cpu_to_le16(8);
break;
default:
/* BUG(); */
packet->ack = RCODE_SEND_ERROR;
return -1;
}
BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
driver_data = (struct driver_data *) &d[3];
driver_data->packet = packet;
packet->driver_data = driver_data;
if (packet->payload_length > 0) {
if (packet->payload_length > sizeof(driver_data->inline_data)) {
payload_bus = dma_map_single(ohci->card.device,
packet->payload,
packet->payload_length,
DMA_TO_DEVICE);
if (dma_mapping_error(ohci->card.device, payload_bus)) {
packet->ack = RCODE_SEND_ERROR;
return -1;
}
packet->payload_bus = payload_bus;
packet->payload_mapped = true;
} else {
memcpy(driver_data->inline_data, packet->payload,
packet->payload_length);
payload_bus = d_bus + 3 * sizeof(*d);
}
d[2].req_count = cpu_to_le16(packet->payload_length);
d[2].data_address = cpu_to_le32(payload_bus);
last = &d[2];
z = 3;
} else {
last = &d[0];
z = 2;
}
last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
DESCRIPTOR_IRQ_ALWAYS |
DESCRIPTOR_BRANCH_ALWAYS);
/* FIXME: Document how the locking works. */
if (ohci->generation != packet->generation) {
if (packet->payload_mapped)
dma_unmap_single(ohci->card.device, payload_bus,
packet->payload_length, DMA_TO_DEVICE);
packet->ack = RCODE_GENERATION;
return -1;
}
context_append(ctx, d, z, 4 - z);
if (ctx->running)
reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
else
context_run(ctx, 0);
return 0;
}
static void at_context_flush(struct context *ctx)
{
tasklet_disable(&ctx->tasklet);
ctx->flushing = true;
context_tasklet((unsigned long)ctx);
ctx->flushing = false;
tasklet_enable(&ctx->tasklet);
}
static int handle_at_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
{
struct driver_data *driver_data;
struct fw_packet *packet;
struct fw_ohci *ohci = context->ohci;
int evt;
if (last->transfer_status == 0 && !context->flushing)
/* This descriptor isn't done yet, stop iteration. */
return 0;
driver_data = (struct driver_data *) &d[3];
packet = driver_data->packet;
if (packet == NULL)
/* This packet was cancelled, just continue. */
return 1;
if (packet->payload_mapped)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
evt = le16_to_cpu(last->transfer_status) & 0x1f;
packet->timestamp = le16_to_cpu(last->res_count);
log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
switch (evt) {
case OHCI1394_evt_timeout:
/* Async response transmit timed out. */
packet->ack = RCODE_CANCELLED;
break;
case OHCI1394_evt_flushed:
/*
* The packet was flushed should give same error as
* when we try to use a stale generation count.
*/
packet->ack = RCODE_GENERATION;
break;
case OHCI1394_evt_missing_ack:
if (context->flushing)
packet->ack = RCODE_GENERATION;
else {
/*
* Using a valid (current) generation count, but the
* node is not on the bus or not sending acks.
*/
packet->ack = RCODE_NO_ACK;
}
break;
case ACK_COMPLETE + 0x10:
case ACK_PENDING + 0x10:
case ACK_BUSY_X + 0x10:
case ACK_BUSY_A + 0x10:
case ACK_BUSY_B + 0x10:
case ACK_DATA_ERROR + 0x10:
case ACK_TYPE_ERROR + 0x10:
packet->ack = evt - 0x10;
break;
case OHCI1394_evt_no_status:
if (context->flushing) {
packet->ack = RCODE_GENERATION;
break;
}
fallthrough;
default:
packet->ack = RCODE_SEND_ERROR;
break;
}
packet->callback(packet, &ohci->card, packet->ack);
return 1;
}
#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
static void handle_local_rom(struct fw_ohci *ohci,
struct fw_packet *packet, u32 csr)
{
struct fw_packet response;
int tcode, length, i;
tcode = HEADER_GET_TCODE(packet->header[0]);
if (TCODE_IS_BLOCK_PACKET(tcode))
length = HEADER_GET_DATA_LENGTH(packet->header[3]);
else
length = 4;
i = csr - CSR_CONFIG_ROM;
if (i + length > CONFIG_ROM_SIZE) {
fw_fill_response(&response, packet->header,
RCODE_ADDRESS_ERROR, NULL, 0);
} else if (!TCODE_IS_READ_REQUEST(tcode)) {
fw_fill_response(&response, packet->header,
RCODE_TYPE_ERROR, NULL, 0);
} else {
fw_fill_response(&response, packet->header, RCODE_COMPLETE,
(void *) ohci->config_rom + i, length);
}
fw_core_handle_response(&ohci->card, &response);
}
static void handle_local_lock(struct fw_ohci *ohci,
struct fw_packet *packet, u32 csr)
{
struct fw_packet response;
int tcode, length, ext_tcode, sel, try;
__be32 *payload, lock_old;
u32 lock_arg, lock_data;
tcode = HEADER_GET_TCODE(packet->header[0]);
length = HEADER_GET_DATA_LENGTH(packet->header[3]);
payload = packet->payload;
ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
if (tcode == TCODE_LOCK_REQUEST &&
ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
lock_arg = be32_to_cpu(payload[0]);
lock_data = be32_to_cpu(payload[1]);
} else if (tcode == TCODE_READ_QUADLET_REQUEST) {
lock_arg = 0;
lock_data = 0;
} else {
fw_fill_response(&response, packet->header,
RCODE_TYPE_ERROR, NULL, 0);
goto out;
}
sel = (csr - CSR_BUS_MANAGER_ID) / 4;
reg_write(ohci, OHCI1394_CSRData, lock_data);
reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
reg_write(ohci, OHCI1394_CSRControl, sel);
for (try = 0; try < 20; try++)
if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
lock_old = cpu_to_be32(reg_read(ohci,
OHCI1394_CSRData));
fw_fill_response(&response, packet->header,
RCODE_COMPLETE,
&lock_old, sizeof(lock_old));
goto out;
}
ohci_err(ohci, "swap not done (CSR lock timeout)\n");
fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
out:
fw_core_handle_response(&ohci->card, &response);
}
static void handle_local_request(struct context *ctx, struct fw_packet *packet)
{
u64 offset, csr;
if (ctx == &ctx->ohci->at_request_ctx) {
packet->ack = ACK_PENDING;
packet->callback(packet, &ctx->ohci->card, packet->ack);
}
offset =
((unsigned long long)
HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
packet->header[2];
csr = offset - CSR_REGISTER_BASE;
/* Handle config rom reads. */
if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
handle_local_rom(ctx->ohci, packet, csr);
else switch (csr) {
case CSR_BUS_MANAGER_ID:
case CSR_BANDWIDTH_AVAILABLE:
case CSR_CHANNELS_AVAILABLE_HI:
case CSR_CHANNELS_AVAILABLE_LO:
handle_local_lock(ctx->ohci, packet, csr);
break;
default:
if (ctx == &ctx->ohci->at_request_ctx)
fw_core_handle_request(&ctx->ohci->card, packet);
else
fw_core_handle_response(&ctx->ohci->card, packet);
break;
}
if (ctx == &ctx->ohci->at_response_ctx) {
packet->ack = ACK_COMPLETE;
packet->callback(packet, &ctx->ohci->card, packet->ack);
}
}
static u32 get_cycle_time(struct fw_ohci *ohci);
static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&ctx->ohci->lock, flags);
if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
ctx->ohci->generation == packet->generation) {
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
// Timestamping on behalf of the hardware.
packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
handle_local_request(ctx, packet);
return;
}
ret = at_context_queue_packet(ctx, packet);
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
if (ret < 0) {
// Timestamping on behalf of the hardware.
packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
packet->callback(packet, &ctx->ohci->card, packet->ack);
}
}
static void detect_dead_context(struct fw_ohci *ohci,
const char *name, unsigned int regs)
{
u32 ctl;
ctl = reg_read(ohci, CONTROL_SET(regs));
if (ctl & CONTEXT_DEAD)
ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
name, evts[ctl & 0x1f]);
}
static void handle_dead_contexts(struct fw_ohci *ohci)
{
unsigned int i;
char name[8];
detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
for (i = 0; i < 32; ++i) {
if (!(ohci->it_context_support & (1 << i)))
continue;
sprintf(name, "IT%u", i);
detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
}
for (i = 0; i < 32; ++i) {
if (!(ohci->ir_context_support & (1 << i)))
continue;
sprintf(name, "IR%u", i);
detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
}
/* TODO: maybe try to flush and restart the dead contexts */
}
static u32 cycle_timer_ticks(u32 cycle_timer)
{
u32 ticks;
ticks = cycle_timer & 0xfff;
ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
ticks += (3072 * 8000) * (cycle_timer >> 25);
return ticks;
}
/*
* Some controllers exhibit one or more of the following bugs when updating the
* iso cycle timer register:
* - When the lowest six bits are wrapping around to zero, a read that happens
* at the same time will return garbage in the lowest ten bits.
* - When the cycleOffset field wraps around to zero, the cycleCount field is
* not incremented for about 60 ns.
* - Occasionally, the entire register reads zero.
*
* To catch these, we read the register three times and ensure that the
* difference between each two consecutive reads is approximately the same, i.e.
* less than twice the other. Furthermore, any negative difference indicates an
* error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
* execute, so we have enough precision to compute the ratio of the differences.)
*/
static u32 get_cycle_time(struct fw_ohci *ohci)
{
u32 c0, c1, c2;
u32 t0, t1, t2;
s32 diff01, diff12;
int i;
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
if (ohci->quirks & QUIRK_CYCLE_TIMER) {
i = 0;
c1 = c2;
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
do {
c0 = c1;
c1 = c2;
c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
t0 = cycle_timer_ticks(c0);
t1 = cycle_timer_ticks(c1);
t2 = cycle_timer_ticks(c2);
diff01 = t1 - t0;
diff12 = t2 - t1;
} while ((diff01 <= 0 || diff12 <= 0 ||
diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
&& i++ < 20);
}
return c2;
}
/*
* This function has to be called at least every 64 seconds. The bus_time
* field stores not only the upper 25 bits of the BUS_TIME register but also
* the most significant bit of the cycle timer in bit 6 so that we can detect
* changes in this bit.
*/
static u32 update_bus_time(struct fw_ohci *ohci)
{
u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
if (unlikely(!ohci->bus_time_running)) {
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
(cycle_time_seconds & 0x40);
ohci->bus_time_running = true;
}
if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
ohci->bus_time += 0x40;
return ohci->bus_time | cycle_time_seconds;
}
static int get_status_for_port(struct fw_ohci *ohci, int port_index)
{
int reg;
mutex_lock(&ohci->phy_reg_mutex);
reg = write_phy_reg(ohci, 7, port_index);
if (reg >= 0)
reg = read_phy_reg(ohci, 8);
mutex_unlock(&ohci->phy_reg_mutex);
if (reg < 0)
return reg;
switch (reg & 0x0f) {
case 0x06:
return 2; /* is child node (connected to parent node) */
case 0x0e:
return 3; /* is parent node (connected to child node) */
}
return 1; /* not connected */
}
static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
int self_id_count)
{
int i;
u32 entry;
for (i = 0; i < self_id_count; i++) {
entry = ohci->self_id_buffer[i];
if ((self_id & 0xff000000) == (entry & 0xff000000))
return -1;
if ((self_id & 0xff000000) < (entry & 0xff000000))
return i;
}
return i;
}
static int initiated_reset(struct fw_ohci *ohci)
{
int reg;
int ret = 0;
mutex_lock(&ohci->phy_reg_mutex);
reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
if (reg >= 0) {
reg = read_phy_reg(ohci, 8);
reg |= 0x40;
reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
if (reg >= 0) {
reg = read_phy_reg(ohci, 12); /* read register 12 */
if (reg >= 0) {
if ((reg & 0x08) == 0x08) {
/* bit 3 indicates "initiated reset" */
ret = 0x2;
}
}
}
}
mutex_unlock(&ohci->phy_reg_mutex);
return ret;
}
/*
* TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
* attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
* Construct the selfID from phy register contents.
*/
static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
{
int reg, i, pos, status;
/* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
u32 self_id = 0x8040c800;
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
ohci_notice(ohci,
"node ID not valid, new bus reset in progress\n");
return -EBUSY;
}
self_id |= ((reg & 0x3f) << 24); /* phy ID */
reg = ohci_read_phy_reg(&ohci->card, 4);
if (reg < 0)
return reg;
self_id |= ((reg & 0x07) << 8); /* power class */
reg = ohci_read_phy_reg(&ohci->card, 1);
if (reg < 0)
return reg;
self_id |= ((reg & 0x3f) << 16); /* gap count */
for (i = 0; i < 3; i++) {
status = get_status_for_port(ohci, i);
if (status < 0)
return status;
self_id |= ((status & 0x3) << (6 - (i * 2)));
}
self_id |= initiated_reset(ohci);
pos = get_self_id_pos(ohci, self_id, self_id_count);
if (pos >= 0) {
memmove(&(ohci->self_id_buffer[pos+1]),
&(ohci->self_id_buffer[pos]),
(self_id_count - pos) * sizeof(*ohci->self_id_buffer));
ohci->self_id_buffer[pos] = self_id;
self_id_count++;
}
return self_id_count;
}
static void bus_reset_work(struct work_struct *work)
{
struct fw_ohci *ohci =
container_of(work, struct fw_ohci, bus_reset_work);
int self_id_count, generation, new_generation, i, j;
u32 reg;
void *free_rom = NULL;
dma_addr_t free_rom_bus = 0;
bool is_new_root;
reg = reg_read(ohci, OHCI1394_NodeID);
if (!(reg & OHCI1394_NodeID_idValid)) {
ohci_notice(ohci,
"node ID not valid, new bus reset in progress\n");
return;
}
if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
ohci_notice(ohci, "malconfigured bus\n");
return;
}
ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
OHCI1394_NodeID_nodeNumber);
is_new_root = (reg & OHCI1394_NodeID_root) != 0;
if (!(ohci->is_root && is_new_root))
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_cycleMaster);
ohci->is_root = is_new_root;
reg = reg_read(ohci, OHCI1394_SelfIDCount);
if (reg & OHCI1394_SelfIDCount_selfIDError) {
ohci_notice(ohci, "self ID receive error\n");
return;
}
/*
* The count in the SelfIDCount register is the number of
* bytes in the self ID receive buffer. Since we also receive
* the inverted quadlets and a header quadlet, we shift one
* bit extra to get the actual number of self IDs.
*/
self_id_count = (reg >> 3) & 0xff;
if (self_id_count > 252) {
ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
return;
}
generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
rmb();
for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
u32 id = cond_le32_to_cpu(ohci->self_id[i]);
u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
if (id != ~id2) {
/*
* If the invalid data looks like a cycle start packet,
* it's likely to be the result of the cycle master
* having a wrong gap count. In this case, the self IDs
* so far are valid and should be processed so that the
* bus manager can then correct the gap count.
*/
if (id == 0xffff008f) {
ohci_notice(ohci, "ignoring spurious self IDs\n");
self_id_count = j;
break;
}
ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
j, self_id_count, id, id2);
return;
}
ohci->self_id_buffer[j] = id;
}
if (ohci->quirks & QUIRK_TI_SLLZ059) {
self_id_count = find_and_insert_self_id(ohci, self_id_count);
if (self_id_count < 0) {
ohci_notice(ohci,
"could not construct local self ID\n");
return;
}
}
if (self_id_count == 0) {
ohci_notice(ohci, "no self IDs\n");
return;
}
rmb();
/*
* Check the consistency of the self IDs we just read. The
* problem we face is that a new bus reset can start while we
* read out the self IDs from the DMA buffer. If this happens,
* the DMA buffer will be overwritten with new self IDs and we
* will read out inconsistent data. The OHCI specification
* (section 11.2) recommends a technique similar to
* linux/seqlock.h, where we remember the generation of the
* self IDs in the buffer before reading them out and compare
* it to the current generation after reading them out. If
* the two generations match we know we have a consistent set
* of self IDs.
*/
new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
if (new_generation != generation) {
ohci_notice(ohci, "new bus reset, discarding self ids\n");
return;
}
/* FIXME: Document how the locking works. */
spin_lock_irq(&ohci->lock);
ohci->generation = -1; /* prevent AT packet queueing */
context_stop(&ohci->at_request_ctx);
context_stop(&ohci->at_response_ctx);
spin_unlock_irq(&ohci->lock);
/*
* Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
* packets in the AT queues and software needs to drain them.
* Some OHCI 1.1 controllers (JMicron) apparently require this too.
*/
at_context_flush(&ohci->at_request_ctx);
at_context_flush(&ohci->at_response_ctx);
spin_lock_irq(&ohci->lock);
ohci->generation = generation;
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
if (ohci->quirks & QUIRK_RESET_PACKET)
ohci->request_generation = generation;
/*
* This next bit is unrelated to the AT context stuff but we
* have to do it under the spinlock also. If a new config rom
* was set up before this reset, the old one is now no longer
* in use and we can free it. Update the config rom pointers
* to point to the current config rom and clear the
* next_config_rom pointer so a new update can take place.
*/
if (ohci->next_config_rom != NULL) {
if (ohci->next_config_rom != ohci->config_rom) {
free_rom = ohci->config_rom;
free_rom_bus = ohci->config_rom_bus;
}
ohci->config_rom = ohci->next_config_rom;
ohci->config_rom_bus = ohci->next_config_rom_bus;
ohci->next_config_rom = NULL;
/*
* Restore config_rom image and manually update
* config_rom registers. Writing the header quadlet
* will indicate that the config rom is ready, so we
* do that last.
*/
reg_write(ohci, OHCI1394_BusOptions,
be32_to_cpu(ohci->config_rom[2]));
ohci->config_rom[0] = ohci->next_header;
reg_write(ohci, OHCI1394_ConfigROMhdr,
be32_to_cpu(ohci->next_header));
}
if (param_remote_dma) {
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
}
spin_unlock_irq(&ohci->lock);
if (free_rom)
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
log_selfids(ohci, generation, self_id_count);
fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
self_id_count, ohci->self_id_buffer,
ohci->csr_state_setclear_abdicate);
ohci->csr_state_setclear_abdicate = false;
}
static irqreturn_t irq_handler(int irq, void *data)
{
struct fw_ohci *ohci = data;
u32 event, iso_event;
int i;
event = reg_read(ohci, OHCI1394_IntEventClear);
if (!event || !~event)
return IRQ_NONE;
/*
* busReset and postedWriteErr must not be cleared yet
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
*/
reg_write(ohci, OHCI1394_IntEventClear,
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
log_irqs(ohci, event);
if (event & OHCI1394_selfIDComplete)
queue_work(selfid_workqueue, &ohci->bus_reset_work);
if (event & OHCI1394_RQPkt)
tasklet_schedule(&ohci->ar_request_ctx.tasklet);
if (event & OHCI1394_RSPkt)
tasklet_schedule(&ohci->ar_response_ctx.tasklet);
if (event & OHCI1394_reqTxComplete)
tasklet_schedule(&ohci->at_request_ctx.tasklet);
if (event & OHCI1394_respTxComplete)
tasklet_schedule(&ohci->at_response_ctx.tasklet);
if (event & OHCI1394_isochRx) {
iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
while (iso_event) {
i = ffs(iso_event) - 1;
tasklet_schedule(
&ohci->ir_context_list[i].context.tasklet);
iso_event &= ~(1 << i);
}
}
if (event & OHCI1394_isochTx) {
iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
while (iso_event) {
i = ffs(iso_event) - 1;
tasklet_schedule(
&ohci->it_context_list[i].context.tasklet);
iso_event &= ~(1 << i);
}
}
if (unlikely(event & OHCI1394_regAccessFail))
ohci_err(ohci, "register access failure\n");
if (unlikely(event & OHCI1394_postedWriteErr)) {
reg_read(ohci, OHCI1394_PostedWriteAddressHi);
reg_read(ohci, OHCI1394_PostedWriteAddressLo);
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_postedWriteErr);
if (printk_ratelimit())
ohci_err(ohci, "PCI posted write error\n");
}
if (unlikely(event & OHCI1394_cycleTooLong)) {
if (printk_ratelimit())
ohci_notice(ohci, "isochronous cycle too long\n");
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_cycleMaster);
}
if (unlikely(event & OHCI1394_cycleInconsistent)) {
/*
* We need to clear this event bit in order to make
* cycleMatch isochronous I/O work. In theory we should
* stop active cycleMatch iso contexts now and restart
* them at least two cycles later. (FIXME?)
*/
if (printk_ratelimit())
ohci_notice(ohci, "isochronous cycle inconsistent\n");
}
if (unlikely(event & OHCI1394_unrecoverableError))
handle_dead_contexts(ohci);
if (event & OHCI1394_cycle64Seconds) {
spin_lock(&ohci->lock);
update_bus_time(ohci);
spin_unlock(&ohci->lock);
} else
flush_writes(ohci);
return IRQ_HANDLED;
}
static int software_reset(struct fw_ohci *ohci)
{
u32 val;
int i;
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
for (i = 0; i < 500; i++) {
val = reg_read(ohci, OHCI1394_HCControlSet);
if (!~val)
return -ENODEV; /* Card was ejected. */
if (!(val & OHCI1394_HCControl_softReset))
return 0;
msleep(1);
}
return -EBUSY;
}
static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
{
size_t size = length * 4;
memcpy(dest, src, size);
if (size < CONFIG_ROM_SIZE)
memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
}
static int configure_1394a_enhancements(struct fw_ohci *ohci)
{
bool enable_1394a;
int ret, clear, set, offset;
/* Check if the driver should configure link and PHY. */
if (!(reg_read(ohci, OHCI1394_HCControlSet) &
OHCI1394_HCControl_programPhyEnable))
return 0;
/* Paranoia: check whether the PHY supports 1394a, too. */
enable_1394a = false;
ret = read_phy_reg(ohci, 2);
if (ret < 0)
return ret;
if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
ret = read_paged_phy_reg(ohci, 1, 8);
if (ret < 0)
return ret;
if (ret >= 1)
enable_1394a = true;
}
if (ohci->quirks & QUIRK_NO_1394A)
enable_1394a = false;
/* Configure PHY and link consistently. */
if (enable_1394a) {
clear = 0;
set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
} else {
clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
set = 0;
}
ret = update_phy_reg(ohci, 5, clear, set);
if (ret < 0)
return ret;
if (enable_1394a)
offset = OHCI1394_HCControlSet;
else
offset = OHCI1394_HCControlClear;
reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
/* Clean up: configuration has been taken care of. */
reg_write(ohci, OHCI1394_HCControlClear,
OHCI1394_HCControl_programPhyEnable);
return 0;
}
static int probe_tsb41ba3d(struct fw_ohci *ohci)
{
/* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
int reg, i;
reg = read_phy_reg(ohci, 2);
if (reg < 0)
return reg;
if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
return 0;
for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
reg = read_paged_phy_reg(ohci, 1, i + 10);
if (reg < 0)
return reg;
if (reg != id[i])
return 0;
}
return 1;
}
static int ohci_enable(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
struct fw_ohci *ohci = fw_ohci(card);
u32 lps, version, irqs;
int i, ret;
ret = software_reset(ohci);
if (ret < 0) {
ohci_err(ohci, "failed to reset ohci card\n");
return ret;
}
/*
* Now enable LPS, which we need in order to start accessing
* most of the registers. In fact, on some cards (ALI M5251),
* accessing registers in the SClk domain without LPS enabled
* will lock up the machine. Wait 50msec to make sure we have
* full link enabled. However, with some cards (well, at least
* a JMicron PCIe card), we have to try again sometimes.
*
* TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
* cannot actually use the phy at that time. These need tens of
* millisecods pause between LPS write and first phy access too.
*/
reg_write(ohci, OHCI1394_HCControlSet,
OHCI1394_HCControl_LPS |
OHCI1394_HCControl_postedWriteEnable);
flush_writes(ohci);
for (lps = 0, i = 0; !lps && i < 3; i++) {
msleep(50);
lps = reg_read(ohci, OHCI1394_HCControlSet) &
OHCI1394_HCControl_LPS;
}
if (!lps) {
ohci_err(ohci, "failed to set Link Power Status\n");
return -EIO;
}
if (ohci->quirks & QUIRK_TI_SLLZ059) {
ret = probe_tsb41ba3d(ohci);
if (ret < 0)
return ret;
if (ret)
ohci_notice(ohci, "local TSB41BA3D phy\n");
else
ohci->quirks &= ~QUIRK_TI_SLLZ059;
}
reg_write(ohci, OHCI1394_HCControlClear,
OHCI1394_HCControl_noByteSwapData);
reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_cycleTimerEnable |
OHCI1394_LinkControl_cycleMaster);
reg_write(ohci, OHCI1394_ATRetries,
OHCI1394_MAX_AT_REQ_RETRIES |
(OHCI1394_MAX_AT_RESP_RETRIES << 4) |
(OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
(200 << 16));
ohci->bus_time_running = false;
for (i = 0; i < 32; i++)
if (ohci->ir_context_support & (1 << i))
reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
IR_CONTEXT_MULTI_CHANNEL_MODE);
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
if (version >= OHCI_VERSION_1_1) {
reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
0xfffffffe);
card->broadcast_channel_auto_allocated = true;
}
/* Get implemented bits of the priority arbitration request counter. */
reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
reg_write(ohci, OHCI1394_FairnessControl, 0);
card->priority_budget_implemented = ohci->pri_req_max != 0;
reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
reg_write(ohci, OHCI1394_IntEventClear, ~0);
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
ret = configure_1394a_enhancements(ohci);
if (ret < 0)
return ret;
/* Activate link_on bit and contender bit in our self ID packets.*/
ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
if (ret < 0)
return ret;
/*
* When the link is not yet enabled, the atomic config rom
* update mechanism described below in ohci_set_config_rom()
* is not active. We have to update ConfigRomHeader and
* BusOptions manually, and the write to ConfigROMmap takes
* effect immediately. We tie this to the enabling of the
* link, so we have a valid config rom before enabling - the
* OHCI requires that ConfigROMhdr and BusOptions have valid
* values before enabling.
*
* However, when the ConfigROMmap is written, some controllers
* always read back quadlets 0 and 2 from the config rom to
* the ConfigRomHeader and BusOptions registers on bus reset.
* They shouldn't do that in this initial case where the link
* isn't enabled. This means we have to use the same
* workaround here, setting the bus header to 0 and then write
* the right values in the bus reset tasklet.
*/
if (config_rom) {
ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
&ohci->next_config_rom_bus, GFP_KERNEL);
if (ohci->next_config_rom == NULL)
return -ENOMEM;
copy_config_rom(ohci->next_config_rom, config_rom, length);
} else {
/*
* In the suspend case, config_rom is NULL, which
* means that we just reuse the old config rom.
*/
ohci->next_config_rom = ohci->config_rom;
ohci->next_config_rom_bus = ohci->config_rom_bus;
}
ohci->next_header = ohci->next_config_rom[0];
ohci->next_config_rom[0] = 0;
reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
reg_write(ohci, OHCI1394_BusOptions,
be32_to_cpu(ohci->next_config_rom[2]));
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_RQPkt | OHCI1394_RSPkt |
OHCI1394_isochTx | OHCI1394_isochRx |
OHCI1394_postedWriteErr |
OHCI1394_selfIDComplete |
OHCI1394_regAccessFail |
OHCI1394_cycleInconsistent |
OHCI1394_unrecoverableError |
OHCI1394_cycleTooLong |
OHCI1394_masterIntEnable;
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
irqs |= OHCI1394_busReset;
reg_write(ohci, OHCI1394_IntMaskSet, irqs);
reg_write(ohci, OHCI1394_HCControlSet,
OHCI1394_HCControl_linkEnable |
OHCI1394_HCControl_BIBimageValid);
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_rcvSelfID |
OHCI1394_LinkControl_rcvPhyPkt);
ar_context_run(&ohci->ar_request_ctx);
ar_context_run(&ohci->ar_response_ctx);
flush_writes(ohci);
/* We are ready to go, reset bus to finish initialization. */
fw_schedule_bus_reset(&ohci->card, false, true);
return 0;
}
static int ohci_set_config_rom(struct fw_card *card,
const __be32 *config_rom, size_t length)
{
struct fw_ohci *ohci;
__be32 *next_config_rom;
dma_addr_t next_config_rom_bus;
ohci = fw_ohci(card);
/*
* When the OHCI controller is enabled, the config rom update
* mechanism is a bit tricky, but easy enough to use. See
* section 5.5.6 in the OHCI specification.
*
* The OHCI controller caches the new config rom address in a
* shadow register (ConfigROMmapNext) and needs a bus reset
* for the changes to take place. When the bus reset is
* detected, the controller loads the new values for the
* ConfigRomHeader and BusOptions registers from the specified
* config rom and loads ConfigROMmap from the ConfigROMmapNext
* shadow register. All automatically and atomically.
*
* Now, there's a twist to this story. The automatic load of
* ConfigRomHeader and BusOptions doesn't honor the
* noByteSwapData bit, so with a be32 config rom, the
* controller will load be32 values in to these registers
* during the atomic update, even on litte endian
* architectures. The workaround we use is to put a 0 in the
* header quadlet; 0 is endian agnostic and means that the
* config rom isn't ready yet. In the bus reset tasklet we
* then set up the real values for the two registers.
*
* We use ohci->lock to avoid racing with the code that sets
* ohci->next_config_rom to NULL (see bus_reset_work).
*/
next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
&next_config_rom_bus, GFP_KERNEL);
if (next_config_rom == NULL)
return -ENOMEM;
spin_lock_irq(&ohci->lock);
/*
* If there is not an already pending config_rom update,
* push our new allocation into the ohci->next_config_rom
* and then mark the local variable as null so that we
* won't deallocate the new buffer.
*
* OTOH, if there is a pending config_rom update, just
* use that buffer with the new config_rom data, and
* let this routine free the unused DMA allocation.
*/
if (ohci->next_config_rom == NULL) {
ohci->next_config_rom = next_config_rom;
ohci->next_config_rom_bus = next_config_rom_bus;
next_config_rom = NULL;
}
copy_config_rom(ohci->next_config_rom, config_rom, length);
ohci->next_header = config_rom[0];
ohci->next_config_rom[0] = 0;
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
spin_unlock_irq(&ohci->lock);
/* If we didn't use the DMA allocation, delete it. */
if (next_config_rom != NULL) {
dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom,
next_config_rom_bus);
}
/*
* Now initiate a bus reset to have the changes take
* effect. We clean up the old config rom memory and DMA
* mappings in the bus reset tasklet, since the OHCI
* controller could need to access it before the bus reset
* takes effect.
*/
fw_schedule_bus_reset(&ohci->card, true, true);
return 0;
}
static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
{
struct fw_ohci *ohci = fw_ohci(card);
at_context_transmit(&ohci->at_request_ctx, packet);
}
static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
{
struct fw_ohci *ohci = fw_ohci(card);
at_context_transmit(&ohci->at_response_ctx, packet);
}
static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
{
struct fw_ohci *ohci = fw_ohci(card);
struct context *ctx = &ohci->at_request_ctx;
struct driver_data *driver_data = packet->driver_data;
int ret = -ENOENT;
tasklet_disable_in_atomic(&ctx->tasklet);
if (packet->ack != 0)
goto out;
if (packet->payload_mapped)
dma_unmap_single(ohci->card.device, packet->payload_bus,
packet->payload_length, DMA_TO_DEVICE);
log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
// Timestamping on behalf of the hardware.
packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
packet->callback(packet, &ohci->card, packet->ack);
ret = 0;
out:
tasklet_enable(&ctx->tasklet);
return ret;
}
static int ohci_enable_phys_dma(struct fw_card *card,
int node_id, int generation)
{
struct fw_ohci *ohci = fw_ohci(card);
unsigned long flags;
int n, ret = 0;
if (param_remote_dma)
return 0;
/*
* FIXME: Make sure this bitmask is cleared when we clear the busReset
* interrupt bit. Clear physReqResourceAllBuses on bus reset.
*/
spin_lock_irqsave(&ohci->lock, flags);
if (ohci->generation != generation) {
ret = -ESTALE;
goto out;
}
/*
* Note, if the node ID contains a non-local bus ID, physical DMA is
* enabled for _all_ nodes on remote buses.
*/
n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
if (n < 32)
reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
else
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
flush_writes(ohci);
out:
spin_unlock_irqrestore(&ohci->lock, flags);
return ret;
}
static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
{
struct fw_ohci *ohci = fw_ohci(card);
unsigned long flags;
u32 value;
switch (csr_offset) {
case CSR_STATE_CLEAR:
case CSR_STATE_SET:
if (ohci->is_root &&
(reg_read(ohci, OHCI1394_LinkControlSet) &
OHCI1394_LinkControl_cycleMaster))
value = CSR_STATE_BIT_CMSTR;
else
value = 0;
if (ohci->csr_state_setclear_abdicate)
value |= CSR_STATE_BIT_ABDICATE;
return value;
case CSR_NODE_IDS:
return reg_read(ohci, OHCI1394_NodeID) << 16;
case CSR_CYCLE_TIME:
return get_cycle_time(ohci);
case CSR_BUS_TIME:
/*
* We might be called just after the cycle timer has wrapped
* around but just before the cycle64Seconds handler, so we
* better check here, too, if the bus time needs to be updated.
*/
spin_lock_irqsave(&ohci->lock, flags);
value = update_bus_time(ohci);
spin_unlock_irqrestore(&ohci->lock, flags);
return value;
case CSR_BUSY_TIMEOUT:
value = reg_read(ohci, OHCI1394_ATRetries);
return (value >> 4) & 0x0ffff00f;
case CSR_PRIORITY_BUDGET:
return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
(ohci->pri_req_max << 8);
default:
WARN_ON(1);
return 0;
}
}
static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
{
struct fw_ohci *ohci = fw_ohci(card);
unsigned long flags;
switch (csr_offset) {
case CSR_STATE_CLEAR:
if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
reg_write(ohci, OHCI1394_LinkControlClear,
OHCI1394_LinkControl_cycleMaster);
flush_writes(ohci);
}
if (value & CSR_STATE_BIT_ABDICATE)
ohci->csr_state_setclear_abdicate = false;
break;
case CSR_STATE_SET:
if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_cycleMaster);
flush_writes(ohci);
}
if (value & CSR_STATE_BIT_ABDICATE)
ohci->csr_state_setclear_abdicate = true;
break;
case CSR_NODE_IDS:
reg_write(ohci, OHCI1394_NodeID, value >> 16);
flush_writes(ohci);
break;
case CSR_CYCLE_TIME:
reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
reg_write(ohci, OHCI1394_IntEventSet,
OHCI1394_cycleInconsistent);
flush_writes(ohci);
break;
case CSR_BUS_TIME:
spin_lock_irqsave(&ohci->lock, flags);
ohci->bus_time = (update_bus_time(ohci) & 0x40) |
(value & ~0x7f);
spin_unlock_irqrestore(&ohci->lock, flags);
break;
case CSR_BUSY_TIMEOUT:
value = (value & 0xf) | ((value & 0xf) << 4) |
((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
reg_write(ohci, OHCI1394_ATRetries, value);
flush_writes(ohci);
break;
case CSR_PRIORITY_BUDGET:
reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
flush_writes(ohci);
break;
default:
WARN_ON(1);
break;
}
}
static void flush_iso_completions(struct iso_context *ctx)
{
ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
ctx->header_length, ctx->header,
ctx->base.callback_data);
ctx->header_length = 0;
}
static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
{
u32 *ctx_hdr;
if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers)
return;
flush_iso_completions(ctx);
}
ctx_hdr = ctx->header + ctx->header_length;
ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
/*
* The two iso header quadlets are byteswapped to little
* endian by the controller, but we want to present them
* as big endian for consistency with the bus endianness.
*/
if (ctx->base.header_size > 0)
ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
if (ctx->base.header_size > 4)
ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
if (ctx->base.header_size > 8)
memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
ctx->header_length += ctx->base.header_size;
}
static int handle_ir_packet_per_buffer(struct context *context,
struct descriptor *d,
struct descriptor *last)
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
struct descriptor *pd;
u32 buffer_dma;
for (pd = d; pd <= last; pd++)
if (pd->transfer_status)
break;
if (pd > last)
/* Descriptor(s) not done yet, stop iteration */
return 0;
while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
d++;
buffer_dma = le32_to_cpu(d->data_address);
dma_sync_single_range_for_cpu(context->ohci->card.device,
buffer_dma & PAGE_MASK,
buffer_dma & ~PAGE_MASK,
le16_to_cpu(d->req_count),
DMA_FROM_DEVICE);
}
copy_iso_headers(ctx, (u32 *) (last + 1));
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
flush_iso_completions(ctx);
return 1;
}
/* d == last because each descriptor block is only a single descriptor. */
static int handle_ir_buffer_fill(struct context *context,
struct descriptor *d,
struct descriptor *last)
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
unsigned int req_count, res_count, completed;
u32 buffer_dma;
req_count = le16_to_cpu(last->req_count);
res_count = le16_to_cpu(READ_ONCE(last->res_count));
completed = req_count - res_count;
buffer_dma = le32_to_cpu(last->data_address);
if (completed > 0) {
ctx->mc_buffer_bus = buffer_dma;
ctx->mc_completed = completed;
}
if (res_count != 0)
/* Descriptor(s) not done yet, stop iteration */
return 0;
dma_sync_single_range_for_cpu(context->ohci->card.device,
buffer_dma & PAGE_MASK,
buffer_dma & ~PAGE_MASK,
completed, DMA_FROM_DEVICE);
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
ctx->base.callback.mc(&ctx->base,
buffer_dma + completed,
ctx->base.callback_data);
ctx->mc_completed = 0;
}
return 1;
}
static void flush_ir_buffer_fill(struct iso_context *ctx)
{
dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
ctx->mc_buffer_bus & PAGE_MASK,
ctx->mc_buffer_bus & ~PAGE_MASK,
ctx->mc_completed, DMA_FROM_DEVICE);
ctx->base.callback.mc(&ctx->base,
ctx->mc_buffer_bus + ctx->mc_completed,
ctx->base.callback_data);
ctx->mc_completed = 0;
}
static inline void sync_it_packet_for_cpu(struct context *context,
struct descriptor *pd)
{
__le16 control;
u32 buffer_dma;
/* only packets beginning with OUTPUT_MORE* have data buffers */
if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
return;
/* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
pd += 2;
/*
* If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
* data buffer is in the context program's coherent page and must not
* be synced.
*/
if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
(context->current_bus & PAGE_MASK)) {
if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
return;
pd++;
}
do {
buffer_dma = le32_to_cpu(pd->data_address);
dma_sync_single_range_for_cpu(context->ohci->card.device,
buffer_dma & PAGE_MASK,
buffer_dma & ~PAGE_MASK,
le16_to_cpu(pd->req_count),
DMA_TO_DEVICE);
control = pd->control;
pd++;
} while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
}
static int handle_it_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
{
struct iso_context *ctx =
container_of(context, struct iso_context, context);
struct descriptor *pd;
__be32 *ctx_hdr;
for (pd = d; pd <= last; pd++)
if (pd->transfer_status)
break;
if (pd > last)
/* Descriptor(s) not done yet, stop iteration */
return 0;
sync_it_packet_for_cpu(context, d);
if (ctx->header_length + 4 > PAGE_SIZE) {
if (ctx->base.drop_overflow_headers)
return 1;
flush_iso_completions(ctx);
}
ctx_hdr = ctx->header + ctx->header_length;
ctx->last_timestamp = le16_to_cpu(last->res_count);
/* Present this value as big-endian to match the receive code */
*ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
le16_to_cpu(pd->res_count));
ctx->header_length += 4;
if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
flush_iso_completions(ctx);
return 1;
}
static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
{
u32 hi = channels >> 32, lo = channels;
reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
ohci->mc_channels = channels;
}
static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
int type, int channel, size_t header_size)
{
struct fw_ohci *ohci = fw_ohci(card);
struct iso_context *ctx;
descriptor_callback_t callback;
u64 *channels;
u32 *mask, regs;
int index, ret = -EBUSY;
spin_lock_irq(&ohci->lock);
switch (type) {
case FW_ISO_CONTEXT_TRANSMIT:
mask = &ohci->it_context_mask;
callback = handle_it_packet;
index = ffs(*mask) - 1;
if (index >= 0) {
*mask &= ~(1 << index);
regs = OHCI1394_IsoXmitContextBase(index);
ctx = &ohci->it_context_list[index];
}
break;
case FW_ISO_CONTEXT_RECEIVE:
channels = &ohci->ir_context_channels;
mask = &ohci->ir_context_mask;
callback = handle_ir_packet_per_buffer;
index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
if (index >= 0) {
*channels &= ~(1ULL << channel);
*mask &= ~(1 << index);
regs = OHCI1394_IsoRcvContextBase(index);
ctx = &ohci->ir_context_list[index];
}
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
mask = &ohci->ir_context_mask;
callback = handle_ir_buffer_fill;
index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
if (index >= 0) {
ohci->mc_allocated = true;
*mask &= ~(1 << index);
regs = OHCI1394_IsoRcvContextBase(index);
ctx = &ohci->ir_context_list[index];
}
break;
default:
index = -1;
ret = -ENOSYS;
}
spin_unlock_irq(&ohci->lock);
if (index < 0)
return ERR_PTR(ret);
memset(ctx, 0, sizeof(*ctx));
ctx->header_length = 0;
ctx->header = (void *) __get_free_page(GFP_KERNEL);
if (ctx->header == NULL) {
ret = -ENOMEM;
goto out;
}
ret = context_init(&ctx->context, ohci, regs, callback);
if (ret < 0)
goto out_with_header;
if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
set_multichannel_mask(ohci, 0);
ctx->mc_completed = 0;
}
return &ctx->base;
out_with_header:
free_page((unsigned long)ctx->header);
out:
spin_lock_irq(&ohci->lock);
switch (type) {
case FW_ISO_CONTEXT_RECEIVE:
*channels |= 1ULL << channel;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
ohci->mc_allocated = false;
break;
}
*mask |= 1 << index;
spin_unlock_irq(&ohci->lock);
return ERR_PTR(ret);
}
static int ohci_start_iso(struct fw_iso_context *base,
s32 cycle, u32 sync, u32 tags)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct fw_ohci *ohci = ctx->context.ohci;
u32 control = IR_CONTEXT_ISOCH_HEADER, match;
int index;
/* the controller cannot start without any queued packets */
if (ctx->context.last->branch_address == 0)
return -ENODATA;
switch (ctx->base.type) {
case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
match = 0;
if (cycle >= 0)
match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
(cycle & 0x7fff) << 16;
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
context_run(&ctx->context, match);
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
fallthrough;
case FW_ISO_CONTEXT_RECEIVE:
index = ctx - ohci->ir_context_list;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
if (cycle >= 0) {
match |= (cycle & 0x07fff) << 12;
control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
}
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
context_run(&ctx->context, control);
ctx->sync = sync;
ctx->tags = tags;
break;
}
return 0;
}
static int ohci_stop_iso(struct fw_iso_context *base)
{
struct fw_ohci *ohci = fw_ohci(base->card);
struct iso_context *ctx = container_of(base, struct iso_context, base);
int index;
switch (ctx->base.type) {
case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
break;
case FW_ISO_CONTEXT_RECEIVE:
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
index = ctx - ohci->ir_context_list;
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
break;
}
flush_writes(ohci);
context_stop(&ctx->context);
tasklet_kill(&ctx->context.tasklet);
return 0;
}
static void ohci_free_iso_context(struct fw_iso_context *base)
{
struct fw_ohci *ohci = fw_ohci(base->card);
struct iso_context *ctx = container_of(base, struct iso_context, base);
unsigned long flags;
int index;
ohci_stop_iso(base);
context_release(&ctx->context);
free_page((unsigned long)ctx->header);
spin_lock_irqsave(&ohci->lock, flags);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
index = ctx - ohci->it_context_list;
ohci->it_context_mask |= 1 << index;
break;
case FW_ISO_CONTEXT_RECEIVE:
index = ctx - ohci->ir_context_list;
ohci->ir_context_mask |= 1 << index;
ohci->ir_context_channels |= 1ULL << base->channel;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
index = ctx - ohci->ir_context_list;
ohci->ir_context_mask |= 1 << index;
ohci->ir_context_channels |= ohci->mc_channels;
ohci->mc_channels = 0;
ohci->mc_allocated = false;
break;
}
spin_unlock_irqrestore(&ohci->lock, flags);
}
static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
{
struct fw_ohci *ohci = fw_ohci(base->card);
unsigned long flags;
int ret;
switch (base->type) {
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
spin_lock_irqsave(&ohci->lock, flags);
/* Don't allow multichannel to grab other contexts' channels. */
if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
*channels = ohci->ir_context_channels;
ret = -EBUSY;
} else {
set_multichannel_mask(ohci, *channels);
ret = 0;
}
spin_unlock_irqrestore(&ohci->lock, flags);
break;
default:
ret = -EINVAL;
}
return ret;
}
#ifdef CONFIG_PM
static void ohci_resume_iso_dma(struct fw_ohci *ohci)
{
int i;
struct iso_context *ctx;
for (i = 0 ; i < ohci->n_ir ; i++) {
ctx = &ohci->ir_context_list[i];
if (ctx->context.running)
ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
}
for (i = 0 ; i < ohci->n_it ; i++) {
ctx = &ohci->it_context_list[i];
if (ctx->context.running)
ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
}
}
#endif
static int queue_iso_transmit(struct iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct descriptor *d, *last, *pd;
struct fw_iso_packet *p;
__le32 *header;
dma_addr_t d_bus, page_bus;
u32 z, header_z, payload_z, irq;
u32 payload_index, payload_end_index, next_page_index;
int page, end_page, i, length, offset;
p = packet;
payload_index = payload;
if (p->skip)
z = 1;
else
z = 2;
if (p->header_length > 0)
z++;
/* Determine the first page the payload isn't contained in. */
end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
if (p->payload_length > 0)
payload_z = end_page - (payload_index >> PAGE_SHIFT);
else
payload_z = 0;
z += payload_z;
/* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
if (d == NULL)
return -ENOMEM;
if (!p->skip) {
d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
d[0].req_count = cpu_to_le16(8);
/*
* Link the skip address to this descriptor itself. This causes
* a context to skip a cycle whenever lost cycles or FIFO
* overruns occur, without dropping the data. The application
* should then decide whether this is an error condition or not.
* FIXME: Make the context's cycle-lost behaviour configurable?
*/
d[0].branch_address = cpu_to_le32(d_bus | z);
header = (__le32 *) &d[1];
header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
IT_HEADER_TAG(p->tag) |
IT_HEADER_TCODE(TCODE_STREAM_DATA) |
IT_HEADER_CHANNEL(ctx->base.channel) |
IT_HEADER_SPEED(ctx->base.speed));
header[1] =
cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
p->payload_length));
}
if (p->header_length > 0) {
d[2].req_count = cpu_to_le16(p->header_length);
d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
memcpy(&d[z], p->header, p->header_length);
}
pd = d + z - payload_z;
payload_end_index = payload_index + p->payload_length;
for (i = 0; i < payload_z; i++) {
page = payload_index >> PAGE_SHIFT;
offset = payload_index & ~PAGE_MASK;
next_page_index = (page + 1) << PAGE_SHIFT;
length =
min(next_page_index, payload_end_index) - payload_index;
pd[i].req_count = cpu_to_le16(length);
page_bus = page_private(buffer->pages[page]);
pd[i].data_address = cpu_to_le32(page_bus + offset);
dma_sync_single_range_for_device(ctx->context.ohci->card.device,
page_bus, offset, length,
DMA_TO_DEVICE);
payload_index += length;
}
if (p->interrupt)
irq = DESCRIPTOR_IRQ_ALWAYS;
else
irq = DESCRIPTOR_NO_IRQ;
last = z == 2 ? d : d + z - 1;
last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
DESCRIPTOR_STATUS |
DESCRIPTOR_BRANCH_ALWAYS |
irq);
context_append(&ctx->context, d, z, header_z);
return 0;
}
static int queue_iso_packet_per_buffer(struct iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct device *device = ctx->context.ohci->card.device;
struct descriptor *d, *pd;
dma_addr_t d_bus, page_bus;
u32 z, header_z, rest;
int i, j, length;
int page, offset, packet_count, header_size, payload_per_buffer;
/*
* The OHCI controller puts the isochronous header and trailer in the
* buffer, so we need at least 8 bytes.
*/
packet_count = packet->header_length / ctx->base.header_size;
header_size = max(ctx->base.header_size, (size_t)8);
/* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(header_size, sizeof(*d));
page = payload >> PAGE_SHIFT;
offset = payload & ~PAGE_MASK;
payload_per_buffer = packet->payload_length / packet_count;
for (i = 0; i < packet_count; i++) {
/* d points to the header descriptor */
z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
d = context_get_descriptors(&ctx->context,
z + header_z, &d_bus);
if (d == NULL)
return -ENOMEM;
d->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_INPUT_MORE);
if (packet->skip && i == 0)
d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
d->req_count = cpu_to_le16(header_size);
d->res_count = d->req_count;
d->transfer_status = 0;
d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
rest = payload_per_buffer;
pd = d;
for (j = 1; j < z; j++) {
pd++;
pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_INPUT_MORE);
if (offset + rest < PAGE_SIZE)
length = rest;
else
length = PAGE_SIZE - offset;
pd->req_count = cpu_to_le16(length);
pd->res_count = pd->req_count;
pd->transfer_status = 0;
page_bus = page_private(buffer->pages[page]);
pd->data_address = cpu_to_le32(page_bus + offset);
dma_sync_single_range_for_device(device, page_bus,
offset, length,
DMA_FROM_DEVICE);
offset = (offset + length) & ~PAGE_MASK;
rest -= length;
if (offset == 0)
page++;
}
pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_INPUT_LAST |
DESCRIPTOR_BRANCH_ALWAYS);
if (packet->interrupt && i == packet_count - 1)
pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
context_append(&ctx->context, d, z, header_z);
}
return 0;
}
static int queue_iso_buffer_fill(struct iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct descriptor *d;
dma_addr_t d_bus, page_bus;
int page, offset, rest, z, i, length;
page = payload >> PAGE_SHIFT;
offset = payload & ~PAGE_MASK;
rest = packet->payload_length;
/* We need one descriptor for each page in the buffer. */
z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
return -EFAULT;
for (i = 0; i < z; i++) {
d = context_get_descriptors(&ctx->context, 1, &d_bus);
if (d == NULL)
return -ENOMEM;
d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
DESCRIPTOR_BRANCH_ALWAYS);
if (packet->skip && i == 0)
d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
if (packet->interrupt && i == z - 1)
d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
if (offset + rest < PAGE_SIZE)
length = rest;
else
length = PAGE_SIZE - offset;
d->req_count = cpu_to_le16(length);
d->res_count = d->req_count;
d->transfer_status = 0;
page_bus = page_private(buffer->pages[page]);
d->data_address = cpu_to_le32(page_bus + offset);
dma_sync_single_range_for_device(ctx->context.ohci->card.device,
page_bus, offset, length,
DMA_FROM_DEVICE);
rest -= length;
offset = 0;
page++;
context_append(&ctx->context, d, 1, 0);
}
return 0;
}
static int ohci_queue_iso(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
unsigned long flags;
int ret = -ENOSYS;
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
ret = queue_iso_transmit(ctx, packet, buffer, payload);
break;
case FW_ISO_CONTEXT_RECEIVE:
ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
break;
}
spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
return ret;
}
static void ohci_flush_queue_iso(struct fw_iso_context *base)
{
struct context *ctx =
&container_of(base, struct iso_context, base)->context;
reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
}
static int ohci_flush_iso_completions(struct fw_iso_context *base)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
int ret = 0;
tasklet_disable_in_atomic(&ctx->context.tasklet);
if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
context_tasklet((unsigned long)&ctx->context);
switch (base->type) {
case FW_ISO_CONTEXT_TRANSMIT:
case FW_ISO_CONTEXT_RECEIVE:
if (ctx->header_length != 0)
flush_iso_completions(ctx);
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
if (ctx->mc_completed != 0)
flush_ir_buffer_fill(ctx);
break;
default:
ret = -ENOSYS;
}
clear_bit_unlock(0, &ctx->flushing_completions);
smp_mb__after_atomic();
}
tasklet_enable(&ctx->context.tasklet);
return ret;
}
static const struct fw_card_driver ohci_driver = {
.enable = ohci_enable,
.read_phy_reg = ohci_read_phy_reg,
.update_phy_reg = ohci_update_phy_reg,
.set_config_rom = ohci_set_config_rom,
.send_request = ohci_send_request,
.send_response = ohci_send_response,
.cancel_packet = ohci_cancel_packet,
.enable_phys_dma = ohci_enable_phys_dma,
.read_csr = ohci_read_csr,
.write_csr = ohci_write_csr,
.allocate_iso_context = ohci_allocate_iso_context,
.free_iso_context = ohci_free_iso_context,
.set_iso_channels = ohci_set_iso_channels,
.queue_iso = ohci_queue_iso,
.flush_queue_iso = ohci_flush_queue_iso,
.flush_iso_completions = ohci_flush_iso_completions,
.start_iso = ohci_start_iso,
.stop_iso = ohci_stop_iso,
};
#ifdef CONFIG_PPC_PMAC
static void pmac_ohci_on(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
if (ofn) {
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
}
}
}
static void pmac_ohci_off(struct pci_dev *dev)
{
if (machine_is(powermac)) {
struct device_node *ofn = pci_device_to_OF_node(dev);
if (ofn) {
pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
}
}
}
#else
static inline void pmac_ohci_on(struct pci_dev *dev) {}
static inline void pmac_ohci_off(struct pci_dev *dev) {}
#endif /* CONFIG_PPC_PMAC */
static void release_ohci(struct device *dev, void *data)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct fw_ohci *ohci = pci_get_drvdata(pdev);
pmac_ohci_off(pdev);
ar_context_release(&ohci->ar_response_ctx);
ar_context_release(&ohci->ar_request_ctx);
dev_notice(dev, "removed fw-ohci device\n");
}
static int pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version;
u64 guid;
int i, err;
size_t size;
if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
return -ENOSYS;
}
ohci = devres_alloc(release_ohci, sizeof(*ohci), GFP_KERNEL);
if (ohci == NULL)
return -ENOMEM;
fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
pci_set_drvdata(dev, ohci);
pmac_ohci_on(dev);
devres_add(&dev->dev, ohci);
err = pcim_enable_device(dev);
if (err) {
dev_err(&dev->dev, "failed to enable OHCI hardware\n");
return err;
}
pci_set_master(dev);
pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
spin_lock_init(&ohci->lock);
mutex_init(&ohci->phy_reg_mutex);
INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
ohci_err(ohci, "invalid MMIO resource\n");
return -ENXIO;
}
err = pcim_iomap_regions(dev, 1 << 0, ohci_driver_name);
if (err) {
ohci_err(ohci, "request and map MMIO resource unavailable\n");
return -ENXIO;
}
ohci->registers = pcim_iomap_table(dev)[0];
for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
if ((ohci_quirks[i].vendor == dev->vendor) &&
(ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
ohci_quirks[i].device == dev->device) &&
(ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
ohci_quirks[i].revision >= dev->revision)) {
ohci->quirks = ohci_quirks[i].flags;
break;
}
if (param_quirks)
ohci->quirks = param_quirks;
/*
* Because dma_alloc_coherent() allocates at least one page,
* we save space by using a common buffer for the AR request/
* response descriptors and the self IDs buffer.
*/
BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus,
GFP_KERNEL);
if (!ohci->misc_buffer)
return -ENOMEM;
err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
OHCI1394_AsReqRcvContextControlSet);
if (err < 0)
return err;
err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
OHCI1394_AsRspRcvContextControlSet);
if (err < 0)
return err;
err = context_init(&ohci->at_request_ctx, ohci,
OHCI1394_AsReqTrContextControlSet, handle_at_packet);
if (err < 0)
return err;
err = context_init(&ohci->at_response_ctx, ohci,
OHCI1394_AsRspTrContextControlSet, handle_at_packet);
if (err < 0)
return err;
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
ohci->ir_context_mask = ohci->ir_context_support;
ohci->n_ir = hweight32(ohci->ir_context_mask);
size = sizeof(struct iso_context) * ohci->n_ir;
ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
if (!ohci->ir_context_list)
return -ENOMEM;
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
/* JMicron JMB38x often shows 0 at first read, just ignore it */
if (!ohci->it_context_support) {
ohci_notice(ohci, "overriding IsoXmitIntMask\n");
ohci->it_context_support = 0xf;
}
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
ohci->it_context_mask = ohci->it_context_support;
ohci->n_it = hweight32(ohci->it_context_mask);
size = sizeof(struct iso_context) * ohci->n_it;
ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
if (!ohci->it_context_list)
return -ENOMEM;
ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
bus_options = reg_read(ohci, OHCI1394_BusOptions);
max_receive = (bus_options >> 12) & 0xf;
link_speed = bus_options & 0x7;
guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
reg_read(ohci, OHCI1394_GUIDLo);
if (!(ohci->quirks & QUIRK_NO_MSI))
pci_enable_msi(dev);
err = devm_request_irq(&dev->dev, dev->irq, irq_handler,
pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name, ohci);
if (err < 0) {
ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
goto fail_msi;
}
err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
if (err)
goto fail_msi;
version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
ohci_notice(ohci,
"added OHCI v%x.%x device as card %d, "
"%d IR + %d IT contexts, quirks 0x%x%s\n",
version >> 16, version & 0xff, ohci->card.index,
ohci->n_ir, ohci->n_it, ohci->quirks,
reg_read(ohci, OHCI1394_PhyUpperBound) ?
", physUB" : "");
return 0;
fail_msi:
pci_disable_msi(dev);
return err;
}
static void pci_remove(struct pci_dev *dev)
{
struct fw_ohci *ohci = pci_get_drvdata(dev);
/*
* If the removal is happening from the suspend state, LPS won't be
* enabled and host registers (eg., IntMaskClear) won't be accessible.
*/
if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
reg_write(ohci, OHCI1394_IntMaskClear, ~0);
flush_writes(ohci);
}
cancel_work_sync(&ohci->bus_reset_work);
fw_core_remove_card(&ohci->card);
/*
* FIXME: Fail all pending packets here, now that the upper
* layers can't queue any more.
*/
software_reset(ohci);
pci_disable_msi(dev);
dev_notice(&dev->dev, "removing fw-ohci device\n");
}
#ifdef CONFIG_PM
static int pci_suspend(struct pci_dev *dev, pm_message_t state)
{
struct fw_ohci *ohci = pci_get_drvdata(dev);
int err;
software_reset(ohci);
err = pci_save_state(dev);
if (err) {
ohci_err(ohci, "pci_save_state failed\n");
return err;
}
err = pci_set_power_state(dev, pci_choose_state(dev, state));
if (err)
ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
pmac_ohci_off(dev);
return 0;
}
static int pci_resume(struct pci_dev *dev)
{
struct fw_ohci *ohci = pci_get_drvdata(dev);
int err;
pmac_ohci_on(dev);
pci_set_power_state(dev, PCI_D0);
pci_restore_state(dev);
err = pci_enable_device(dev);
if (err) {
ohci_err(ohci, "pci_enable_device failed\n");
return err;
}
/* Some systems don't setup GUID register on resume from ram */
if (!reg_read(ohci, OHCI1394_GUIDLo) &&
!reg_read(ohci, OHCI1394_GUIDHi)) {
reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
}
err = ohci_enable(&ohci->card, NULL, 0);
if (err)
return err;
ohci_resume_iso_dma(ohci);
return 0;
}
#endif
static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
{ }
};
MODULE_DEVICE_TABLE(pci, pci_table);
static struct pci_driver fw_ohci_pci_driver = {
.name = ohci_driver_name,
.id_table = pci_table,
.probe = pci_probe,
.remove = pci_remove,
#ifdef CONFIG_PM
.resume = pci_resume,
.suspend = pci_suspend,
#endif
};
static int __init fw_ohci_init(void)
{
selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
if (!selfid_workqueue)
return -ENOMEM;
return pci_register_driver(&fw_ohci_pci_driver);
}
static void __exit fw_ohci_cleanup(void)
{
pci_unregister_driver(&fw_ohci_pci_driver);
destroy_workqueue(selfid_workqueue);
}
module_init(fw_ohci_init);
module_exit(fw_ohci_cleanup);
MODULE_AUTHOR("Kristian Hoegsberg <[email protected]>");
MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
MODULE_LICENSE("GPL");
/* Provide a module alias so root-on-sbp2 initrds don't break. */
MODULE_ALIAS("ohci1394");
| linux-master | drivers/firewire/ohci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Core IEEE1394 transaction logic
*
* Copyright (C) 2004-2006 Kristian Hoegsberg <[email protected]>
*/
#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include "core.h"
#define HEADER_PRI(pri) ((pri) << 0)
#define HEADER_TCODE(tcode) ((tcode) << 4)
#define HEADER_RETRY(retry) ((retry) << 8)
#define HEADER_TLABEL(tlabel) ((tlabel) << 10)
#define HEADER_DESTINATION(destination) ((destination) << 16)
#define HEADER_SOURCE(source) ((source) << 16)
#define HEADER_RCODE(rcode) ((rcode) << 12)
#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
#define HEADER_DATA_LENGTH(length) ((length) << 16)
#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
#define HEADER_DESTINATION_IS_BROADCAST(q) \
(((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
#define PHY_PACKET_CONFIG 0x0
#define PHY_PACKET_LINK_ON 0x1
#define PHY_PACKET_SELF_ID 0x2
#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id) ((id) << 30)
/* returns 0 if the split timeout handler is already running */
static int try_cancel_split_timeout(struct fw_transaction *t)
{
if (t->is_split_transaction)
return del_timer(&t->split_timeout_timer);
else
return 1;
}
static int close_transaction(struct fw_transaction *transaction, struct fw_card *card, int rcode,
u32 response_tstamp)
{
struct fw_transaction *t = NULL, *iter;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(iter, &card->transaction_list, link) {
if (iter == transaction) {
if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
list_del_init(&iter->link);
card->tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
if (t) {
if (!t->with_tstamp) {
t->callback.without_tstamp(card, rcode, NULL, 0, t->callback_data);
} else {
t->callback.with_tstamp(card, rcode, t->packet.timestamp, response_tstamp,
NULL, 0, t->callback_data);
}
return 0;
}
timed_out:
return -ENOENT;
}
/*
* Only valid for transactions that are potentially pending (ie have
* been sent).
*/
int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction)
{
u32 tstamp;
/*
* Cancel the packet transmission if it's still queued. That
* will call the packet transmission callback which cancels
* the transaction.
*/
if (card->driver->cancel_packet(card, &transaction->packet) == 0)
return 0;
/*
* If the request packet has already been sent, we need to see
* if the transaction is still pending and remove it in that case.
*/
if (transaction->packet.ack == 0) {
// The timestamp is reused since it was just read now.
tstamp = transaction->packet.timestamp;
} else {
u32 curr_cycle_time = 0;
(void)fw_card_read_cycle_time(card, &curr_cycle_time);
tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time);
}
return close_transaction(transaction, card, RCODE_CANCELLED, tstamp);
}
EXPORT_SYMBOL(fw_cancel_transaction);
static void split_transaction_timeout_callback(struct timer_list *timer)
{
struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
struct fw_card *card = t->card;
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
if (list_empty(&t->link)) {
spin_unlock_irqrestore(&card->lock, flags);
return;
}
list_del(&t->link);
card->tlabel_mask &= ~(1ULL << t->tlabel);
spin_unlock_irqrestore(&card->lock, flags);
if (!t->with_tstamp) {
t->callback.without_tstamp(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
} else {
t->callback.with_tstamp(card, RCODE_CANCELLED, t->packet.timestamp,
t->split_timeout_cycle, NULL, 0, t->callback_data);
}
}
static void start_split_transaction_timeout(struct fw_transaction *t,
struct fw_card *card)
{
unsigned long flags;
spin_lock_irqsave(&card->lock, flags);
if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
spin_unlock_irqrestore(&card->lock, flags);
return;
}
t->is_split_transaction = true;
mod_timer(&t->split_timeout_timer,
jiffies + card->split_timeout_jiffies);
spin_unlock_irqrestore(&card->lock, flags);
}
static u32 compute_split_timeout_timestamp(struct fw_card *card, u32 request_timestamp);
static void transmit_complete_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
struct fw_transaction *t =
container_of(packet, struct fw_transaction, packet);
switch (status) {
case ACK_COMPLETE:
close_transaction(t, card, RCODE_COMPLETE, packet->timestamp);
break;
case ACK_PENDING:
{
t->split_timeout_cycle =
compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff;
start_split_transaction_timeout(t, card);
break;
}
case ACK_BUSY_X:
case ACK_BUSY_A:
case ACK_BUSY_B:
close_transaction(t, card, RCODE_BUSY, packet->timestamp);
break;
case ACK_DATA_ERROR:
close_transaction(t, card, RCODE_DATA_ERROR, packet->timestamp);
break;
case ACK_TYPE_ERROR:
close_transaction(t, card, RCODE_TYPE_ERROR, packet->timestamp);
break;
default:
/*
* In this case the ack is really a juju specific
* rcode, so just forward that to the callback.
*/
close_transaction(t, card, status, packet->timestamp);
break;
}
}
static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
int destination_id, int source_id, int generation, int speed,
unsigned long long offset, void *payload, size_t length)
{
int ext_tcode;
if (tcode == TCODE_STREAM_DATA) {
packet->header[0] =
HEADER_DATA_LENGTH(length) |
destination_id |
HEADER_TCODE(TCODE_STREAM_DATA);
packet->header_length = 4;
packet->payload = payload;
packet->payload_length = length;
goto common;
}
if (tcode > 0x10) {
ext_tcode = tcode & ~0x10;
tcode = TCODE_LOCK_REQUEST;
} else
ext_tcode = 0;
packet->header[0] =
HEADER_RETRY(RETRY_X) |
HEADER_TLABEL(tlabel) |
HEADER_TCODE(tcode) |
HEADER_DESTINATION(destination_id);
packet->header[1] =
HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
packet->header[2] =
offset;
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
packet->header[3] = *(u32 *)payload;
packet->header_length = 16;
packet->payload_length = 0;
break;
case TCODE_LOCK_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
packet->header[3] =
HEADER_DATA_LENGTH(length) |
HEADER_EXTENDED_TCODE(ext_tcode);
packet->header_length = 16;
packet->payload = payload;
packet->payload_length = length;
break;
case TCODE_READ_QUADLET_REQUEST:
packet->header_length = 12;
packet->payload_length = 0;
break;
case TCODE_READ_BLOCK_REQUEST:
packet->header[3] =
HEADER_DATA_LENGTH(length) |
HEADER_EXTENDED_TCODE(ext_tcode);
packet->header_length = 16;
packet->payload_length = 0;
break;
default:
WARN(1, "wrong tcode %d\n", tcode);
}
common:
packet->speed = speed;
packet->generation = generation;
packet->ack = 0;
packet->payload_mapped = false;
}
static int allocate_tlabel(struct fw_card *card)
{
int tlabel;
tlabel = card->current_tlabel;
while (card->tlabel_mask & (1ULL << tlabel)) {
tlabel = (tlabel + 1) & 0x3f;
if (tlabel == card->current_tlabel)
return -EBUSY;
}
card->current_tlabel = (tlabel + 1) & 0x3f;
card->tlabel_mask |= 1ULL << tlabel;
return tlabel;
}
/**
* __fw_send_request() - submit a request packet for transmission to generate callback for response
* subaction with or without time stamp.
* @card: interface to send the request at
* @t: transaction instance to which the request belongs
* @tcode: transaction code
* @destination_id: destination node ID, consisting of bus_ID and phy_ID
* @generation: bus generation in which request and response are valid
* @speed: transmission speed
* @offset: 48bit wide offset into destination's address space
* @payload: data payload for the request subaction
* @length: length of the payload, in bytes
* @callback: union of two functions whether to receive time stamp or not for response
* subaction.
* @with_tstamp: Whether to receive time stamp or not for response subaction.
* @callback_data: data to be passed to the transaction completion callback
*
* Submit a request packet into the asynchronous request transmission queue.
* Can be called from atomic context. If you prefer a blocking API, use
* fw_run_transaction() in a context that can sleep.
*
* In case of lock requests, specify one of the firewire-core specific %TCODE_
* constants instead of %TCODE_LOCK_REQUEST in @tcode.
*
* Make sure that the value in @destination_id is not older than the one in
* @generation. Otherwise the request is in danger to be sent to a wrong node.
*
* In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
* needs to synthesize @destination_id with fw_stream_packet_destination_id().
* It will contain tag, channel, and sy data instead of a node ID then.
*
* The payload buffer at @data is going to be DMA-mapped except in case of
* @length <= 8 or of local (loopback) requests. Hence make sure that the
* buffer complies with the restrictions of the streaming DMA mapping API.
* @payload must not be freed before the @callback is called.
*
* In case of request types without payload, @data is NULL and @length is 0.
*
* After the transaction is completed successfully or unsuccessfully, the
* @callback will be called. Among its parameters is the response code which
* is either one of the rcodes per IEEE 1394 or, in case of internal errors,
* the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
* specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
* %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
* generation, or missing ACK respectively.
*
* Note some timing corner cases: fw_send_request() may complete much earlier
* than when the request packet actually hits the wire. On the other hand,
* transaction completion and hence execution of @callback may happen even
* before fw_send_request() returns.
*/
void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
int destination_id, int generation, int speed, unsigned long long offset,
void *payload, size_t length, union fw_transaction_callback callback,
bool with_tstamp, void *callback_data)
{
unsigned long flags;
int tlabel;
/*
* Allocate tlabel from the bitmap and put the transaction on
* the list while holding the card spinlock.
*/
spin_lock_irqsave(&card->lock, flags);
tlabel = allocate_tlabel(card);
if (tlabel < 0) {
spin_unlock_irqrestore(&card->lock, flags);
if (!with_tstamp) {
callback.without_tstamp(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
} else {
// Timestamping on behalf of hardware.
u32 curr_cycle_time = 0;
u32 tstamp;
(void)fw_card_read_cycle_time(card, &curr_cycle_time);
tstamp = cycle_time_to_ohci_tstamp(curr_cycle_time);
callback.with_tstamp(card, RCODE_SEND_ERROR, tstamp, tstamp, NULL, 0,
callback_data);
}
return;
}
t->node_id = destination_id;
t->tlabel = tlabel;
t->card = card;
t->is_split_transaction = false;
timer_setup(&t->split_timeout_timer, split_transaction_timeout_callback, 0);
t->callback = callback;
t->with_tstamp = with_tstamp;
t->callback_data = callback_data;
fw_fill_request(&t->packet, tcode, t->tlabel, destination_id, card->node_id, generation,
speed, offset, payload, length);
t->packet.callback = transmit_complete_callback;
list_add_tail(&t->link, &card->transaction_list);
spin_unlock_irqrestore(&card->lock, flags);
card->driver->send_request(card, &t->packet);
}
EXPORT_SYMBOL_GPL(__fw_send_request);
struct transaction_callback_data {
struct completion done;
void *payload;
int rcode;
};
static void transaction_callback(struct fw_card *card, int rcode,
void *payload, size_t length, void *data)
{
struct transaction_callback_data *d = data;
if (rcode == RCODE_COMPLETE)
memcpy(d->payload, payload, length);
d->rcode = rcode;
complete(&d->done);
}
/**
* fw_run_transaction() - send request and sleep until transaction is completed
* @card: card interface for this request
* @tcode: transaction code
* @destination_id: destination node ID, consisting of bus_ID and phy_ID
* @generation: bus generation in which request and response are valid
* @speed: transmission speed
* @offset: 48bit wide offset into destination's address space
* @payload: data payload for the request subaction
* @length: length of the payload, in bytes
*
* Returns the RCODE. See fw_send_request() for parameter documentation.
* Unlike fw_send_request(), @data points to the payload of the request or/and
* to the payload of the response. DMA mapping restrictions apply to outbound
* request payloads of >= 8 bytes but not to inbound response payloads.
*/
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
void *payload, size_t length)
{
struct transaction_callback_data d;
struct fw_transaction t;
timer_setup_on_stack(&t.split_timeout_timer, NULL, 0);
init_completion(&d.done);
d.payload = payload;
fw_send_request(card, &t, tcode, destination_id, generation, speed,
offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done);
destroy_timer_on_stack(&t.split_timeout_timer);
return d.rcode;
}
EXPORT_SYMBOL(fw_run_transaction);
static DEFINE_MUTEX(phy_config_mutex);
static DECLARE_COMPLETION(phy_config_done);
static void transmit_phy_packet_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
complete(&phy_config_done);
}
static struct fw_packet phy_config_packet = {
.header_length = 12,
.header[0] = TCODE_LINK_INTERNAL << 4,
.payload_length = 0,
.speed = SCODE_100,
.callback = transmit_phy_packet_callback,
};
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count)
{
long timeout = DIV_ROUND_UP(HZ, 10);
u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG);
if (node_id != FW_PHY_CONFIG_NO_NODE_ID)
data |= PHY_CONFIG_ROOT_ID(node_id);
if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
gap_count = card->driver->read_phy_reg(card, 1);
if (gap_count < 0)
return;
gap_count &= 63;
if (gap_count == 63)
return;
}
data |= PHY_CONFIG_GAP_COUNT(gap_count);
mutex_lock(&phy_config_mutex);
phy_config_packet.header[1] = data;
phy_config_packet.header[2] = ~data;
phy_config_packet.generation = generation;
reinit_completion(&phy_config_done);
card->driver->send_request(card, &phy_config_packet);
wait_for_completion_timeout(&phy_config_done, timeout);
mutex_unlock(&phy_config_mutex);
}
static struct fw_address_handler *lookup_overlapping_address_handler(
struct list_head *list, unsigned long long offset, size_t length)
{
struct fw_address_handler *handler;
list_for_each_entry_rcu(handler, list, link) {
if (handler->offset < offset + length &&
offset < handler->offset + handler->length)
return handler;
}
return NULL;
}
static bool is_enclosing_handler(struct fw_address_handler *handler,
unsigned long long offset, size_t length)
{
return handler->offset <= offset &&
offset + length <= handler->offset + handler->length;
}
static struct fw_address_handler *lookup_enclosing_address_handler(
struct list_head *list, unsigned long long offset, size_t length)
{
struct fw_address_handler *handler;
list_for_each_entry_rcu(handler, list, link) {
if (is_enclosing_handler(handler, offset, length))
return handler;
}
return NULL;
}
static DEFINE_SPINLOCK(address_handler_list_lock);
static LIST_HEAD(address_handler_list);
const struct fw_address_region fw_high_memory_region =
{ .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
EXPORT_SYMBOL(fw_high_memory_region);
static const struct fw_address_region low_memory_region =
{ .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
#if 0
const struct fw_address_region fw_private_region =
{ .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
const struct fw_address_region fw_csr_region =
{ .start = CSR_REGISTER_BASE,
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
const struct fw_address_region fw_unit_space_region =
{ .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
#endif /* 0 */
/**
* fw_core_add_address_handler() - register for incoming requests
* @handler: callback
* @region: region in the IEEE 1212 node space address range
*
* region->start, ->end, and handler->length have to be quadlet-aligned.
*
* When a request is received that falls within the specified address range,
* the specified callback is invoked. The parameters passed to the callback
* give the details of the particular request.
*
* To be called in process context.
* Return value: 0 on success, non-zero otherwise.
*
* The start offset of the handler's address region is determined by
* fw_core_add_address_handler() and is returned in handler->offset.
*
* Address allocations are exclusive, except for the FCP registers.
*/
int fw_core_add_address_handler(struct fw_address_handler *handler,
const struct fw_address_region *region)
{
struct fw_address_handler *other;
int ret = -EBUSY;
if (region->start & 0xffff000000000003ULL ||
region->start >= region->end ||
region->end > 0x0001000000000000ULL ||
handler->length & 3 ||
handler->length == 0)
return -EINVAL;
spin_lock(&address_handler_list_lock);
handler->offset = region->start;
while (handler->offset + handler->length <= region->end) {
if (is_in_fcp_region(handler->offset, handler->length))
other = NULL;
else
other = lookup_overlapping_address_handler
(&address_handler_list,
handler->offset, handler->length);
if (other != NULL) {
handler->offset += other->length;
} else {
list_add_tail_rcu(&handler->link, &address_handler_list);
ret = 0;
break;
}
}
spin_unlock(&address_handler_list_lock);
return ret;
}
EXPORT_SYMBOL(fw_core_add_address_handler);
/**
* fw_core_remove_address_handler() - unregister an address handler
* @handler: callback
*
* To be called in process context.
*
* When fw_core_remove_address_handler() returns, @handler->callback() is
* guaranteed to not run on any CPU anymore.
*/
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
spin_lock(&address_handler_list_lock);
list_del_rcu(&handler->link);
spin_unlock(&address_handler_list_lock);
synchronize_rcu();
}
EXPORT_SYMBOL(fw_core_remove_address_handler);
struct fw_request {
struct kref kref;
struct fw_packet response;
u32 request_header[4];
int ack;
u32 timestamp;
u32 length;
u32 data[];
};
void fw_request_get(struct fw_request *request)
{
kref_get(&request->kref);
}
static void release_request(struct kref *kref)
{
struct fw_request *request = container_of(kref, struct fw_request, kref);
kfree(request);
}
void fw_request_put(struct fw_request *request)
{
kref_put(&request->kref, release_request);
}
static void free_response_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
struct fw_request *request = container_of(packet, struct fw_request, response);
// Decrease the reference count since not at in-flight.
fw_request_put(request);
// Decrease the reference count to release the object.
fw_request_put(request);
}
int fw_get_response_length(struct fw_request *r)
{
int tcode, ext_tcode, data_length;
tcode = HEADER_GET_TCODE(r->request_header[0]);
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
return 0;
case TCODE_READ_QUADLET_REQUEST:
return 4;
case TCODE_READ_BLOCK_REQUEST:
data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
return data_length;
case TCODE_LOCK_REQUEST:
ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]);
data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
switch (ext_tcode) {
case EXTCODE_FETCH_ADD:
case EXTCODE_LITTLE_ADD:
return data_length;
default:
return data_length / 2;
}
default:
WARN(1, "wrong tcode %d\n", tcode);
return 0;
}
}
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length)
{
int tcode, tlabel, extended_tcode, source, destination;
tcode = HEADER_GET_TCODE(request_header[0]);
tlabel = HEADER_GET_TLABEL(request_header[0]);
source = HEADER_GET_DESTINATION(request_header[0]);
destination = HEADER_GET_SOURCE(request_header[1]);
extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
response->header[0] =
HEADER_RETRY(RETRY_1) |
HEADER_TLABEL(tlabel) |
HEADER_DESTINATION(destination);
response->header[1] =
HEADER_SOURCE(source) |
HEADER_RCODE(rcode);
response->header[2] = 0;
switch (tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
response->header_length = 12;
response->payload_length = 0;
break;
case TCODE_READ_QUADLET_REQUEST:
response->header[0] |=
HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
if (payload != NULL)
response->header[3] = *(u32 *)payload;
else
response->header[3] = 0;
response->header_length = 16;
response->payload_length = 0;
break;
case TCODE_READ_BLOCK_REQUEST:
case TCODE_LOCK_REQUEST:
response->header[0] |= HEADER_TCODE(tcode + 2);
response->header[3] =
HEADER_DATA_LENGTH(length) |
HEADER_EXTENDED_TCODE(extended_tcode);
response->header_length = 16;
response->payload = payload;
response->payload_length = length;
break;
default:
WARN(1, "wrong tcode %d\n", tcode);
}
response->payload_mapped = false;
}
EXPORT_SYMBOL(fw_fill_response);
static u32 compute_split_timeout_timestamp(struct fw_card *card,
u32 request_timestamp)
{
unsigned int cycles;
u32 timestamp;
cycles = card->split_timeout_cycles;
cycles += request_timestamp & 0x1fff;
timestamp = request_timestamp & ~0x1fff;
timestamp += (cycles / 8000) << 13;
timestamp |= cycles % 8000;
return timestamp;
}
static struct fw_request *allocate_request(struct fw_card *card,
struct fw_packet *p)
{
struct fw_request *request;
u32 *data, length;
int request_tcode;
request_tcode = HEADER_GET_TCODE(p->header[0]);
switch (request_tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
data = &p->header[3];
length = 4;
break;
case TCODE_WRITE_BLOCK_REQUEST:
case TCODE_LOCK_REQUEST:
data = p->payload;
length = HEADER_GET_DATA_LENGTH(p->header[3]);
break;
case TCODE_READ_QUADLET_REQUEST:
data = NULL;
length = 4;
break;
case TCODE_READ_BLOCK_REQUEST:
data = NULL;
length = HEADER_GET_DATA_LENGTH(p->header[3]);
break;
default:
fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n",
p->header[0], p->header[1], p->header[2]);
return NULL;
}
request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
if (request == NULL)
return NULL;
kref_init(&request->kref);
request->response.speed = p->speed;
request->response.timestamp =
compute_split_timeout_timestamp(card, p->timestamp);
request->response.generation = p->generation;
request->response.ack = 0;
request->response.callback = free_response_callback;
request->ack = p->ack;
request->timestamp = p->timestamp;
request->length = length;
if (data)
memcpy(request->data, data, length);
memcpy(request->request_header, p->header, sizeof(p->header));
return request;
}
/**
* fw_send_response: - send response packet for asynchronous transaction.
* @card: interface to send the response at.
* @request: firewire request data for the transaction.
* @rcode: response code to send.
*
* Submit a response packet into the asynchronous response transmission queue. The @request
* is going to be released when the transmission successfully finishes later.
*/
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode)
{
/* unified transaction or broadcast transaction: don't respond */
if (request->ack != ACK_PENDING ||
HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
fw_request_put(request);
return;
}
if (rcode == RCODE_COMPLETE)
fw_fill_response(&request->response, request->request_header,
rcode, request->data,
fw_get_response_length(request));
else
fw_fill_response(&request->response, request->request_header,
rcode, NULL, 0);
// Increase the reference count so that the object is kept during in-flight.
fw_request_get(request);
card->driver->send_response(card, &request->response);
}
EXPORT_SYMBOL(fw_send_response);
/**
* fw_get_request_speed() - returns speed at which the @request was received
* @request: firewire request data
*/
int fw_get_request_speed(struct fw_request *request)
{
return request->response.speed;
}
EXPORT_SYMBOL(fw_get_request_speed);
/**
* fw_request_get_timestamp: Get timestamp of the request.
* @request: The opaque pointer to request structure.
*
* Get timestamp when 1394 OHCI controller receives the asynchronous request subaction. The
* timestamp consists of the low order 3 bits of second field and the full 13 bits of count
* field of isochronous cycle time register.
*
* Returns: timestamp of the request.
*/
u32 fw_request_get_timestamp(const struct fw_request *request)
{
return request->timestamp;
}
EXPORT_SYMBOL_GPL(fw_request_get_timestamp);
static void handle_exclusive_region_request(struct fw_card *card,
struct fw_packet *p,
struct fw_request *request,
unsigned long long offset)
{
struct fw_address_handler *handler;
int tcode, destination, source;
destination = HEADER_GET_DESTINATION(p->header[0]);
source = HEADER_GET_SOURCE(p->header[1]);
tcode = HEADER_GET_TCODE(p->header[0]);
if (tcode == TCODE_LOCK_REQUEST)
tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
rcu_read_lock();
handler = lookup_enclosing_address_handler(&address_handler_list,
offset, request->length);
if (handler)
handler->address_callback(card, request,
tcode, destination, source,
p->generation, offset,
request->data, request->length,
handler->callback_data);
rcu_read_unlock();
if (!handler)
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
}
static void handle_fcp_region_request(struct fw_card *card,
struct fw_packet *p,
struct fw_request *request,
unsigned long long offset)
{
struct fw_address_handler *handler;
int tcode, destination, source;
if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
request->length > 0x200) {
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
return;
}
tcode = HEADER_GET_TCODE(p->header[0]);
destination = HEADER_GET_DESTINATION(p->header[0]);
source = HEADER_GET_SOURCE(p->header[1]);
if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
tcode != TCODE_WRITE_BLOCK_REQUEST) {
fw_send_response(card, request, RCODE_TYPE_ERROR);
return;
}
rcu_read_lock();
list_for_each_entry_rcu(handler, &address_handler_list, link) {
if (is_enclosing_handler(handler, offset, request->length))
handler->address_callback(card, request, tcode,
destination, source,
p->generation, offset,
request->data,
request->length,
handler->callback_data);
}
rcu_read_unlock();
fw_send_response(card, request, RCODE_COMPLETE);
}
void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
{
struct fw_request *request;
unsigned long long offset;
if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
return;
if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) {
fw_cdev_handle_phy_packet(card, p);
return;
}
request = allocate_request(card, p);
if (request == NULL) {
/* FIXME: send statically allocated busy packet. */
return;
}
offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) |
p->header[2];
if (!is_in_fcp_region(offset, request->length))
handle_exclusive_region_request(card, p, request, offset);
else
handle_fcp_region_request(card, p, request, offset);
}
EXPORT_SYMBOL(fw_core_handle_request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
struct fw_transaction *t = NULL, *iter;
unsigned long flags;
u32 *data;
size_t data_length;
int tcode, tlabel, source, rcode;
tcode = HEADER_GET_TCODE(p->header[0]);
tlabel = HEADER_GET_TLABEL(p->header[0]);
source = HEADER_GET_SOURCE(p->header[1]);
rcode = HEADER_GET_RCODE(p->header[1]);
spin_lock_irqsave(&card->lock, flags);
list_for_each_entry(iter, &card->transaction_list, link) {
if (iter->node_id == source && iter->tlabel == tlabel) {
if (!try_cancel_split_timeout(iter)) {
spin_unlock_irqrestore(&card->lock, flags);
goto timed_out;
}
list_del_init(&iter->link);
card->tlabel_mask &= ~(1ULL << iter->tlabel);
t = iter;
break;
}
}
spin_unlock_irqrestore(&card->lock, flags);
if (!t) {
timed_out:
fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
source, tlabel);
return;
}
/*
* FIXME: sanity check packet, is length correct, does tcodes
* and addresses match.
*/
switch (tcode) {
case TCODE_READ_QUADLET_RESPONSE:
data = (u32 *) &p->header[3];
data_length = 4;
break;
case TCODE_WRITE_RESPONSE:
data = NULL;
data_length = 0;
break;
case TCODE_READ_BLOCK_RESPONSE:
case TCODE_LOCK_RESPONSE:
data = p->payload;
data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
break;
default:
/* Should never happen, this is just to shut up gcc. */
data = NULL;
data_length = 0;
break;
}
/*
* The response handler may be executed while the request handler
* is still pending. Cancel the request handler.
*/
card->driver->cancel_packet(card, &t->packet);
if (!t->with_tstamp) {
t->callback.without_tstamp(card, rcode, data, data_length, t->callback_data);
} else {
t->callback.with_tstamp(card, rcode, t->packet.timestamp, p->timestamp, data,
data_length, t->callback_data);
}
}
EXPORT_SYMBOL(fw_core_handle_response);
/**
* fw_rcode_string - convert a firewire result code to an error description
* @rcode: the result code
*/
const char *fw_rcode_string(int rcode)
{
static const char *const names[] = {
[RCODE_COMPLETE] = "no error",
[RCODE_CONFLICT_ERROR] = "conflict error",
[RCODE_DATA_ERROR] = "data error",
[RCODE_TYPE_ERROR] = "type error",
[RCODE_ADDRESS_ERROR] = "address error",
[RCODE_SEND_ERROR] = "send error",
[RCODE_CANCELLED] = "timeout",
[RCODE_BUSY] = "busy",
[RCODE_GENERATION] = "bus reset",
[RCODE_NO_ACK] = "no ack",
};
if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
return names[rcode];
else
return "unknown";
}
EXPORT_SYMBOL(fw_rcode_string);
static const struct fw_address_region topology_map_region =
{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
.end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
static void handle_topology_map(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
unsigned long long offset, void *payload, size_t length,
void *callback_data)
{
int start;
if (!TCODE_IS_READ_REQUEST(tcode)) {
fw_send_response(card, request, RCODE_TYPE_ERROR);
return;
}
if ((offset & 3) > 0 || (length & 3) > 0) {
fw_send_response(card, request, RCODE_ADDRESS_ERROR);
return;
}
start = (offset - topology_map_region.start) / 4;
memcpy(payload, &card->topology_map[start], length);
fw_send_response(card, request, RCODE_COMPLETE);
}
static struct fw_address_handler topology_map = {
.length = 0x400,
.address_callback = handle_topology_map,
};
static const struct fw_address_region registers_region =
{ .start = CSR_REGISTER_BASE,
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void update_split_timeout(struct fw_card *card)
{
unsigned int cycles;
cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
/* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
cycles = clamp(cycles, 800u, 3u * 8000u);
card->split_timeout_cycles = cycles;
card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
}
static void handle_registers(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
unsigned long long offset, void *payload, size_t length,
void *callback_data)
{
int reg = offset & ~CSR_REGISTER_BASE;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
unsigned long flags;
switch (reg) {
case CSR_PRIORITY_BUDGET:
if (!card->priority_budget_implemented) {
rcode = RCODE_ADDRESS_ERROR;
break;
}
fallthrough;
case CSR_NODE_IDS:
/*
* per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
* and 9.6, but interoperable with IEEE 1394.1-2004 bridges
*/
fallthrough;
case CSR_STATE_CLEAR:
case CSR_STATE_SET:
case CSR_CYCLE_TIME:
case CSR_BUS_TIME:
case CSR_BUSY_TIMEOUT:
if (tcode == TCODE_READ_QUADLET_REQUEST)
*data = cpu_to_be32(card->driver->read_csr(card, reg));
else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
card->driver->write_csr(card, reg, be32_to_cpu(*data));
else
rcode = RCODE_TYPE_ERROR;
break;
case CSR_RESET_START:
if (tcode == TCODE_WRITE_QUADLET_REQUEST)
card->driver->write_csr(card, CSR_STATE_CLEAR,
CSR_STATE_BIT_ABDICATE);
else
rcode = RCODE_TYPE_ERROR;
break;
case CSR_SPLIT_TIMEOUT_HI:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
*data = cpu_to_be32(card->split_timeout_hi);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
spin_lock_irqsave(&card->lock, flags);
card->split_timeout_hi = be32_to_cpu(*data) & 7;
update_split_timeout(card);
spin_unlock_irqrestore(&card->lock, flags);
} else {
rcode = RCODE_TYPE_ERROR;
}
break;
case CSR_SPLIT_TIMEOUT_LO:
if (tcode == TCODE_READ_QUADLET_REQUEST) {
*data = cpu_to_be32(card->split_timeout_lo);
} else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
spin_lock_irqsave(&card->lock, flags);
card->split_timeout_lo =
be32_to_cpu(*data) & 0xfff80000;
update_split_timeout(card);
spin_unlock_irqrestore(&card->lock, flags);
} else {
rcode = RCODE_TYPE_ERROR;
}
break;
case CSR_MAINT_UTILITY:
if (tcode == TCODE_READ_QUADLET_REQUEST)
*data = card->maint_utility_register;
else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
card->maint_utility_register = *data;
else
rcode = RCODE_TYPE_ERROR;
break;
case CSR_BROADCAST_CHANNEL:
if (tcode == TCODE_READ_QUADLET_REQUEST)
*data = cpu_to_be32(card->broadcast_channel);
else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
card->broadcast_channel =
(be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
BROADCAST_CHANNEL_INITIAL;
else
rcode = RCODE_TYPE_ERROR;
break;
case CSR_BUS_MANAGER_ID:
case CSR_BANDWIDTH_AVAILABLE:
case CSR_CHANNELS_AVAILABLE_HI:
case CSR_CHANNELS_AVAILABLE_LO:
/*
* FIXME: these are handled by the OHCI hardware and
* the stack never sees these request. If we add
* support for a new type of controller that doesn't
* handle this in hardware we need to deal with these
* transactions.
*/
BUG();
break;
default:
rcode = RCODE_ADDRESS_ERROR;
break;
}
fw_send_response(card, request, rcode);
}
static struct fw_address_handler registers = {
.length = 0x400,
.address_callback = handle_registers,
};
static void handle_low_memory(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
unsigned long long offset, void *payload, size_t length,
void *callback_data)
{
/*
* This catches requests not handled by the physical DMA unit,
* i.e., wrong transaction types or unauthorized source nodes.
*/
fw_send_response(card, request, RCODE_TYPE_ERROR);
}
static struct fw_address_handler low_memory = {
.length = FW_MAX_PHYSICAL_RANGE,
.address_callback = handle_low_memory,
};
MODULE_AUTHOR("Kristian Hoegsberg <[email protected]>");
MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
MODULE_LICENSE("GPL");
static const u32 vendor_textual_descriptor[] = {
/* textual descriptor leaf () */
0x00060000,
0x00000000,
0x00000000,
0x4c696e75, /* L i n u */
0x78204669, /* x F i */
0x72657769, /* r e w i */
0x72650000, /* r e */
};
static const u32 model_textual_descriptor[] = {
/* model descriptor leaf () */
0x00030000,
0x00000000,
0x00000000,
0x4a756a75, /* J u j u */
};
static struct fw_descriptor vendor_id_descriptor = {
.length = ARRAY_SIZE(vendor_textual_descriptor),
.immediate = 0x03001f11,
.key = 0x81000000,
.data = vendor_textual_descriptor,
};
static struct fw_descriptor model_id_descriptor = {
.length = ARRAY_SIZE(model_textual_descriptor),
.immediate = 0x17023901,
.key = 0x81000000,
.data = model_textual_descriptor,
};
static int __init fw_core_init(void)
{
int ret;
fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
if (!fw_workqueue)
return -ENOMEM;
ret = bus_register(&fw_bus_type);
if (ret < 0) {
destroy_workqueue(fw_workqueue);
return ret;
}
fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
if (fw_cdev_major < 0) {
bus_unregister(&fw_bus_type);
destroy_workqueue(fw_workqueue);
return fw_cdev_major;
}
fw_core_add_address_handler(&topology_map, &topology_map_region);
fw_core_add_address_handler(®isters, ®isters_region);
fw_core_add_address_handler(&low_memory, &low_memory_region);
fw_core_add_descriptor(&vendor_id_descriptor);
fw_core_add_descriptor(&model_id_descriptor);
return 0;
}
static void __exit fw_core_cleanup(void)
{
unregister_chrdev(fw_cdev_major, "firewire");
bus_unregister(&fw_bus_type);
destroy_workqueue(fw_workqueue);
idr_destroy(&fw_device_idr);
}
module_init(fw_core_init);
module_exit(fw_core_cleanup);
| linux-master | drivers/firewire/core-transaction.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* nosy - Snoop mode driver for TI PCILynx 1394 controllers
* Copyright (C) 2002-2007 Kristian Høgsberg
*/
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/sched.h> /* required for linux/wait.h */
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/time64.h>
#include <linux/timex.h>
#include <linux/uaccess.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "nosy.h"
#include "nosy-user.h"
#define TCODE_PHY_PACKET 0x10
#define PCI_DEVICE_ID_TI_PCILYNX 0x8000
static char driver_name[] = KBUILD_MODNAME;
/* this is the physical layout of a PCL, its size is 128 bytes */
struct pcl {
__le32 next;
__le32 async_error_next;
u32 user_data;
__le32 pcl_status;
__le32 remaining_transfer_count;
__le32 next_data_buffer;
struct {
__le32 control;
__le32 pointer;
} buffer[13];
};
struct packet {
unsigned int length;
char data[];
};
struct packet_buffer {
char *data;
size_t capacity;
long total_packet_count, lost_packet_count;
atomic_t size;
struct packet *head, *tail;
wait_queue_head_t wait;
};
struct pcilynx {
struct pci_dev *pci_device;
__iomem char *registers;
struct pcl *rcv_start_pcl, *rcv_pcl;
__le32 *rcv_buffer;
dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
spinlock_t client_list_lock;
struct list_head client_list;
struct miscdevice misc;
struct list_head link;
struct kref kref;
};
static inline struct pcilynx *
lynx_get(struct pcilynx *lynx)
{
kref_get(&lynx->kref);
return lynx;
}
static void
lynx_release(struct kref *kref)
{
kfree(container_of(kref, struct pcilynx, kref));
}
static inline void
lynx_put(struct pcilynx *lynx)
{
kref_put(&lynx->kref, lynx_release);
}
struct client {
struct pcilynx *lynx;
u32 tcode_mask;
struct packet_buffer buffer;
struct list_head link;
};
static DEFINE_MUTEX(card_mutex);
static LIST_HEAD(card_list);
static int
packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
{
buffer->data = kmalloc(capacity, GFP_KERNEL);
if (buffer->data == NULL)
return -ENOMEM;
buffer->head = (struct packet *) buffer->data;
buffer->tail = (struct packet *) buffer->data;
buffer->capacity = capacity;
buffer->lost_packet_count = 0;
atomic_set(&buffer->size, 0);
init_waitqueue_head(&buffer->wait);
return 0;
}
static void
packet_buffer_destroy(struct packet_buffer *buffer)
{
kfree(buffer->data);
}
static int
packet_buffer_get(struct client *client, char __user *data, size_t user_length)
{
struct packet_buffer *buffer = &client->buffer;
size_t length;
char *end;
if (wait_event_interruptible(buffer->wait,
atomic_read(&buffer->size) > 0) ||
list_empty(&client->lynx->link))
return -ERESTARTSYS;
if (atomic_read(&buffer->size) == 0)
return -ENODEV;
/* FIXME: Check length <= user_length. */
end = buffer->data + buffer->capacity;
length = buffer->head->length;
if (&buffer->head->data[length] < end) {
if (copy_to_user(data, buffer->head->data, length))
return -EFAULT;
buffer->head = (struct packet *) &buffer->head->data[length];
} else {
size_t split = end - buffer->head->data;
if (copy_to_user(data, buffer->head->data, split))
return -EFAULT;
if (copy_to_user(data + split, buffer->data, length - split))
return -EFAULT;
buffer->head = (struct packet *) &buffer->data[length - split];
}
/*
* Decrease buffer->size as the last thing, since this is what
* keeps the interrupt from overwriting the packet we are
* retrieving from the buffer.
*/
atomic_sub(sizeof(struct packet) + length, &buffer->size);
return length;
}
static void
packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
{
char *end;
buffer->total_packet_count++;
if (buffer->capacity <
atomic_read(&buffer->size) + sizeof(struct packet) + length) {
buffer->lost_packet_count++;
return;
}
end = buffer->data + buffer->capacity;
buffer->tail->length = length;
if (&buffer->tail->data[length] < end) {
memcpy(buffer->tail->data, data, length);
buffer->tail = (struct packet *) &buffer->tail->data[length];
} else {
size_t split = end - buffer->tail->data;
memcpy(buffer->tail->data, data, split);
memcpy(buffer->data, data + split, length - split);
buffer->tail = (struct packet *) &buffer->data[length - split];
}
/* Finally, adjust buffer size and wake up userspace reader. */
atomic_add(sizeof(struct packet) + length, &buffer->size);
wake_up_interruptible(&buffer->wait);
}
static inline void
reg_write(struct pcilynx *lynx, int offset, u32 data)
{
writel(data, lynx->registers + offset);
}
static inline u32
reg_read(struct pcilynx *lynx, int offset)
{
return readl(lynx->registers + offset);
}
static inline void
reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
{
reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
}
/*
* Maybe the pcl programs could be set up to just append data instead
* of using a whole packet.
*/
static inline void
run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
int dmachan)
{
reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
}
static int
set_phy_reg(struct pcilynx *lynx, int addr, int val)
{
if (addr > 15) {
dev_err(&lynx->pci_device->dev,
"PHY register address %d out of range\n", addr);
return -1;
}
if (val > 0xff) {
dev_err(&lynx->pci_device->dev,
"PHY register value %d out of range\n", val);
return -1;
}
reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
return 0;
}
static int
nosy_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
struct client *client;
struct pcilynx *tmp, *lynx = NULL;
mutex_lock(&card_mutex);
list_for_each_entry(tmp, &card_list, link)
if (tmp->misc.minor == minor) {
lynx = lynx_get(tmp);
break;
}
mutex_unlock(&card_mutex);
if (lynx == NULL)
return -ENODEV;
client = kmalloc(sizeof *client, GFP_KERNEL);
if (client == NULL)
goto fail;
client->tcode_mask = ~0;
client->lynx = lynx;
INIT_LIST_HEAD(&client->link);
if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
goto fail;
file->private_data = client;
return stream_open(inode, file);
fail:
kfree(client);
lynx_put(lynx);
return -ENOMEM;
}
static int
nosy_release(struct inode *inode, struct file *file)
{
struct client *client = file->private_data;
struct pcilynx *lynx = client->lynx;
spin_lock_irq(&lynx->client_list_lock);
list_del_init(&client->link);
spin_unlock_irq(&lynx->client_list_lock);
packet_buffer_destroy(&client->buffer);
kfree(client);
lynx_put(lynx);
return 0;
}
static __poll_t
nosy_poll(struct file *file, poll_table *pt)
{
struct client *client = file->private_data;
__poll_t ret = 0;
poll_wait(file, &client->buffer.wait, pt);
if (atomic_read(&client->buffer.size) > 0)
ret = EPOLLIN | EPOLLRDNORM;
if (list_empty(&client->lynx->link))
ret |= EPOLLHUP;
return ret;
}
static ssize_t
nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
{
struct client *client = file->private_data;
return packet_buffer_get(client, buffer, count);
}
static long
nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct client *client = file->private_data;
spinlock_t *client_list_lock = &client->lynx->client_list_lock;
struct nosy_stats stats;
int ret;
switch (cmd) {
case NOSY_IOC_GET_STATS:
spin_lock_irq(client_list_lock);
stats.total_packet_count = client->buffer.total_packet_count;
stats.lost_packet_count = client->buffer.lost_packet_count;
spin_unlock_irq(client_list_lock);
if (copy_to_user((void __user *) arg, &stats, sizeof stats))
return -EFAULT;
else
return 0;
case NOSY_IOC_START:
ret = -EBUSY;
spin_lock_irq(client_list_lock);
if (list_empty(&client->link)) {
list_add_tail(&client->link, &client->lynx->client_list);
ret = 0;
}
spin_unlock_irq(client_list_lock);
return ret;
case NOSY_IOC_STOP:
spin_lock_irq(client_list_lock);
list_del_init(&client->link);
spin_unlock_irq(client_list_lock);
return 0;
case NOSY_IOC_FILTER:
spin_lock_irq(client_list_lock);
client->tcode_mask = arg;
spin_unlock_irq(client_list_lock);
return 0;
default:
return -EINVAL;
/* Flush buffer, configure filter. */
}
}
static const struct file_operations nosy_ops = {
.owner = THIS_MODULE,
.read = nosy_read,
.unlocked_ioctl = nosy_ioctl,
.poll = nosy_poll,
.open = nosy_open,
.release = nosy_release,
};
#define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
static void
packet_irq_handler(struct pcilynx *lynx)
{
struct client *client;
u32 tcode_mask, tcode, timestamp;
size_t length;
struct timespec64 ts64;
/* FIXME: Also report rcv_speed. */
length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
ktime_get_real_ts64(&ts64);
timestamp = ts64.tv_nsec / NSEC_PER_USEC;
lynx->rcv_buffer[0] = (__force __le32)timestamp;
if (length == PHY_PACKET_SIZE)
tcode_mask = 1 << TCODE_PHY_PACKET;
else
tcode_mask = 1 << tcode;
spin_lock(&lynx->client_list_lock);
list_for_each_entry(client, &lynx->client_list, link)
if (client->tcode_mask & tcode_mask)
packet_buffer_put(&client->buffer,
lynx->rcv_buffer, length + 4);
spin_unlock(&lynx->client_list_lock);
}
static void
bus_reset_irq_handler(struct pcilynx *lynx)
{
struct client *client;
struct timespec64 ts64;
u32 timestamp;
ktime_get_real_ts64(&ts64);
timestamp = ts64.tv_nsec / NSEC_PER_USEC;
spin_lock(&lynx->client_list_lock);
list_for_each_entry(client, &lynx->client_list, link)
packet_buffer_put(&client->buffer, ×tamp, 4);
spin_unlock(&lynx->client_list_lock);
}
static irqreturn_t
irq_handler(int irq, void *device)
{
struct pcilynx *lynx = device;
u32 pci_int_status;
pci_int_status = reg_read(lynx, PCI_INT_STATUS);
if (pci_int_status == ~0)
/* Card was ejected. */
return IRQ_NONE;
if ((pci_int_status & PCI_INT_INT_PEND) == 0)
/* Not our interrupt, bail out quickly. */
return IRQ_NONE;
if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
u32 link_int_status;
link_int_status = reg_read(lynx, LINK_INT_STATUS);
reg_write(lynx, LINK_INT_STATUS, link_int_status);
if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
bus_reset_irq_handler(lynx);
}
/* Clear the PCI_INT_STATUS register only after clearing the
* LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
* be set again immediately. */
reg_write(lynx, PCI_INT_STATUS, pci_int_status);
if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
packet_irq_handler(lynx);
run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
}
return IRQ_HANDLED;
}
static void
remove_card(struct pci_dev *dev)
{
struct pcilynx *lynx = pci_get_drvdata(dev);
struct client *client;
mutex_lock(&card_mutex);
list_del_init(&lynx->link);
misc_deregister(&lynx->misc);
mutex_unlock(&card_mutex);
reg_write(lynx, PCI_INT_ENABLE, 0);
free_irq(lynx->pci_device->irq, lynx);
spin_lock_irq(&lynx->client_list_lock);
list_for_each_entry(client, &lynx->client_list, link)
wake_up_interruptible(&client->buffer.wait);
spin_unlock_irq(&lynx->client_list_lock);
dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
lynx->rcv_pcl, lynx->rcv_pcl_bus);
dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE, lynx->rcv_buffer,
lynx->rcv_buffer_bus);
iounmap(lynx->registers);
pci_disable_device(dev);
lynx_put(lynx);
}
#define RCV_BUFFER_SIZE (16 * 1024)
static int
add_card(struct pci_dev *dev, const struct pci_device_id *unused)
{
struct pcilynx *lynx;
u32 p, end;
int ret, i;
if (dma_set_mask(&dev->dev, DMA_BIT_MASK(32))) {
dev_err(&dev->dev,
"DMA address limits not supported for PCILynx hardware\n");
return -ENXIO;
}
if (pci_enable_device(dev)) {
dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
return -ENXIO;
}
pci_set_master(dev);
lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
if (lynx == NULL) {
dev_err(&dev->dev, "Failed to allocate control structure\n");
ret = -ENOMEM;
goto fail_disable;
}
lynx->pci_device = dev;
pci_set_drvdata(dev, lynx);
spin_lock_init(&lynx->client_list_lock);
INIT_LIST_HEAD(&lynx->client_list);
kref_init(&lynx->kref);
lynx->registers = ioremap(pci_resource_start(dev, 0),
PCILYNX_MAX_REGISTER);
if (lynx->registers == NULL) {
dev_err(&dev->dev, "Failed to map registers\n");
ret = -ENOMEM;
goto fail_deallocate_lynx;
}
lynx->rcv_start_pcl = dma_alloc_coherent(&lynx->pci_device->dev,
sizeof(struct pcl),
&lynx->rcv_start_pcl_bus,
GFP_KERNEL);
lynx->rcv_pcl = dma_alloc_coherent(&lynx->pci_device->dev,
sizeof(struct pcl),
&lynx->rcv_pcl_bus, GFP_KERNEL);
lynx->rcv_buffer = dma_alloc_coherent(&lynx->pci_device->dev,
RCV_BUFFER_SIZE,
&lynx->rcv_buffer_bus, GFP_KERNEL);
if (lynx->rcv_start_pcl == NULL ||
lynx->rcv_pcl == NULL ||
lynx->rcv_buffer == NULL) {
dev_err(&dev->dev, "Failed to allocate receive buffer\n");
ret = -ENOMEM;
goto fail_deallocate_buffers;
}
lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID);
lynx->rcv_pcl->buffer[0].control =
cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
lynx->rcv_pcl->buffer[0].pointer =
cpu_to_le32(lynx->rcv_buffer_bus + 4);
p = lynx->rcv_buffer_bus + 2048;
end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
for (i = 1; p < end; i++, p += 2048) {
lynx->rcv_pcl->buffer[i].control =
cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
}
lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
/* Fix buggy cards with autoboot pin not tied low: */
reg_write(lynx, DMA0_CHAN_CTRL, 0);
reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
#if 0
/* now, looking for PHY register set */
if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
lynx->phyic.reg_1394a = 1;
PRINT(KERN_INFO, lynx->id,
"found 1394a conform PHY (using extended register set)");
lynx->phyic.vendor = get_phy_vendorid(lynx);
lynx->phyic.product = get_phy_productid(lynx);
} else {
lynx->phyic.reg_1394a = 0;
PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
}
#endif
/* Setup the general receive FIFO max size. */
reg_write(lynx, FIFO_SIZES, 255);
reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
reg_write(lynx, LINK_INT_ENABLE,
LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
/* Disable the L flag in self ID packets. */
set_phy_reg(lynx, 4, 0);
/* Put this baby into snoop mode */
reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
driver_name, lynx)) {
dev_err(&dev->dev,
"Failed to allocate shared interrupt %d\n", dev->irq);
ret = -EIO;
goto fail_deallocate_buffers;
}
lynx->misc.parent = &dev->dev;
lynx->misc.minor = MISC_DYNAMIC_MINOR;
lynx->misc.name = "nosy";
lynx->misc.fops = &nosy_ops;
mutex_lock(&card_mutex);
ret = misc_register(&lynx->misc);
if (ret) {
dev_err(&dev->dev, "Failed to register misc char device\n");
mutex_unlock(&card_mutex);
goto fail_free_irq;
}
list_add_tail(&lynx->link, &card_list);
mutex_unlock(&card_mutex);
dev_info(&dev->dev,
"Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
return 0;
fail_free_irq:
reg_write(lynx, PCI_INT_ENABLE, 0);
free_irq(lynx->pci_device->irq, lynx);
fail_deallocate_buffers:
if (lynx->rcv_start_pcl)
dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
lynx->rcv_start_pcl,
lynx->rcv_start_pcl_bus);
if (lynx->rcv_pcl)
dma_free_coherent(&lynx->pci_device->dev, sizeof(struct pcl),
lynx->rcv_pcl, lynx->rcv_pcl_bus);
if (lynx->rcv_buffer)
dma_free_coherent(&lynx->pci_device->dev, PAGE_SIZE,
lynx->rcv_buffer, lynx->rcv_buffer_bus);
iounmap(lynx->registers);
fail_deallocate_lynx:
kfree(lynx);
fail_disable:
pci_disable_device(dev);
return ret;
}
static struct pci_device_id pci_table[] = {
{
.vendor = PCI_VENDOR_ID_TI,
.device = PCI_DEVICE_ID_TI_PCILYNX,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, pci_table);
static struct pci_driver lynx_pci_driver = {
.name = driver_name,
.id_table = pci_table,
.probe = add_card,
.remove = remove_card,
};
module_pci_driver(lynx_pci_driver);
MODULE_AUTHOR("Kristian Hoegsberg");
MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
MODULE_LICENSE("GPL");
| linux-master | drivers/firewire/nosy.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* IPv4 over IEEE 1394, per RFC 2734
* IPv6 over IEEE 1394, per RFC 3146
*
* Copyright (C) 2009 Jay Fenlason <[email protected]>
*
* based on eth1394 by Ben Collins et al
*/
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/highmem.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/jiffies.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <asm/unaligned.h>
#include <net/arp.h>
#include <net/firewire.h>
/* rx limits */
#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */
#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2)
/* tx limits */
#define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */
#define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */
#define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */
#define IEEE1394_BROADCAST_CHANNEL 31
#define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
#define IEEE1394_MAX_PAYLOAD_S100 512
#define FWNET_NO_FIFO_ADDR (~0ULL)
#define IANA_SPECIFIER_ID 0x00005eU
#define RFC2734_SW_VERSION 0x000001U
#define RFC3146_SW_VERSION 0x000002U
#define IEEE1394_GASP_HDR_SIZE 8
#define RFC2374_UNFRAG_HDR_SIZE 4
#define RFC2374_FRAG_HDR_SIZE 8
#define RFC2374_FRAG_OVERHEAD 4
#define RFC2374_HDR_UNFRAG 0 /* unfragmented */
#define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */
#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
static bool fwnet_hwaddr_is_multicast(u8 *ha)
{
return !!(*ha & 1);
}
/* IPv4 and IPv6 encapsulation header */
struct rfc2734_header {
u32 w0;
u32 w1;
};
#define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30)
#define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff))
#define fwnet_get_hdr_dg_size(h) ((((h)->w0 & 0x0fff0000) >> 16) + 1)
#define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff))
#define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16)
#define fwnet_set_hdr_lf(lf) ((lf) << 30)
#define fwnet_set_hdr_ether_type(et) (et)
#define fwnet_set_hdr_dg_size(dgs) (((dgs) - 1) << 16)
#define fwnet_set_hdr_fg_off(fgo) (fgo)
#define fwnet_set_hdr_dgl(dgl) ((dgl) << 16)
static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr,
unsigned ether_type)
{
hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG)
| fwnet_set_hdr_ether_type(ether_type);
}
static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr,
unsigned ether_type, unsigned dg_size, unsigned dgl)
{
hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG)
| fwnet_set_hdr_dg_size(dg_size)
| fwnet_set_hdr_ether_type(ether_type);
hdr->w1 = fwnet_set_hdr_dgl(dgl);
}
static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr,
unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl)
{
hdr->w0 = fwnet_set_hdr_lf(lf)
| fwnet_set_hdr_dg_size(dg_size)
| fwnet_set_hdr_fg_off(fg_off);
hdr->w1 = fwnet_set_hdr_dgl(dgl);
}
/* This list keeps track of what parts of the datagram have been filled in */
struct fwnet_fragment_info {
struct list_head fi_link;
u16 offset;
u16 len;
};
struct fwnet_partial_datagram {
struct list_head pd_link;
struct list_head fi_list;
struct sk_buff *skb;
/* FIXME Why not use skb->data? */
char *pbuf;
u16 datagram_label;
u16 ether_type;
u16 datagram_size;
};
static DEFINE_MUTEX(fwnet_device_mutex);
static LIST_HEAD(fwnet_device_list);
struct fwnet_device {
struct list_head dev_link;
spinlock_t lock;
enum {
FWNET_BROADCAST_ERROR,
FWNET_BROADCAST_RUNNING,
FWNET_BROADCAST_STOPPED,
} broadcast_state;
struct fw_iso_context *broadcast_rcv_context;
struct fw_iso_buffer broadcast_rcv_buffer;
void **broadcast_rcv_buffer_ptrs;
unsigned broadcast_rcv_next_ptr;
unsigned num_broadcast_rcv_ptrs;
unsigned rcv_buffer_size;
/*
* This value is the maximum unfragmented datagram size that can be
* sent by the hardware. It already has the GASP overhead and the
* unfragmented datagram header overhead calculated into it.
*/
unsigned broadcast_xmt_max_payload;
u16 broadcast_xmt_datagramlabel;
/*
* The CSR address that remote nodes must send datagrams to for us to
* receive them.
*/
struct fw_address_handler handler;
u64 local_fifo;
/* Number of tx datagrams that have been queued but not yet acked */
int queued_datagrams;
int peer_count;
struct list_head peer_list;
struct fw_card *card;
struct net_device *netdev;
};
struct fwnet_peer {
struct list_head peer_link;
struct fwnet_device *dev;
u64 guid;
/* guarded by dev->lock */
struct list_head pd_list; /* received partial datagrams */
unsigned pdg_size; /* pd_list size */
u16 datagram_label; /* outgoing datagram label */
u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
int node_id;
int generation;
unsigned speed;
};
/* This is our task struct. It's used for the packet complete callback. */
struct fwnet_packet_task {
struct fw_transaction transaction;
struct rfc2734_header hdr;
struct sk_buff *skb;
struct fwnet_device *dev;
int outstanding_pkts;
u64 fifo_addr;
u16 dest_node;
u16 max_payload;
u8 generation;
u8 speed;
u8 enqueued;
};
/*
* saddr == NULL means use device source address.
* daddr == NULL means leave destination address (eg unresolved arp).
*/
static int fwnet_header_create(struct sk_buff *skb, struct net_device *net,
unsigned short type, const void *daddr,
const void *saddr, unsigned len)
{
struct fwnet_header *h;
h = skb_push(skb, sizeof(*h));
put_unaligned_be16(type, &h->h_proto);
if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) {
memset(h->h_dest, 0, net->addr_len);
return net->hard_header_len;
}
if (daddr) {
memcpy(h->h_dest, daddr, net->addr_len);
return net->hard_header_len;
}
return -net->hard_header_len;
}
static int fwnet_header_cache(const struct neighbour *neigh,
struct hh_cache *hh, __be16 type)
{
struct net_device *net;
struct fwnet_header *h;
if (type == cpu_to_be16(ETH_P_802_3))
return -1;
net = neigh->dev;
h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h)));
h->h_proto = type;
memcpy(h->h_dest, neigh->ha, net->addr_len);
/* Pairs with the READ_ONCE() in neigh_resolve_output(),
* neigh_hh_output() and neigh_update_hhs().
*/
smp_store_release(&hh->hh_len, FWNET_HLEN);
return 0;
}
/* Called by Address Resolution module to notify changes in address. */
static void fwnet_header_cache_update(struct hh_cache *hh,
const struct net_device *net, const unsigned char *haddr)
{
memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len);
}
static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr)
{
memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN);
return FWNET_ALEN;
}
static const struct header_ops fwnet_header_ops = {
.create = fwnet_header_create,
.cache = fwnet_header_cache,
.cache_update = fwnet_header_cache_update,
.parse = fwnet_header_parse,
};
/* FIXME: is this correct for all cases? */
static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd,
unsigned offset, unsigned len)
{
struct fwnet_fragment_info *fi;
unsigned end = offset + len;
list_for_each_entry(fi, &pd->fi_list, fi_link)
if (offset < fi->offset + fi->len && end > fi->offset)
return true;
return false;
}
/* Assumes that new fragment does not overlap any existing fragments */
static struct fwnet_fragment_info *fwnet_frag_new(
struct fwnet_partial_datagram *pd, unsigned offset, unsigned len)
{
struct fwnet_fragment_info *fi, *fi2, *new;
struct list_head *list;
list = &pd->fi_list;
list_for_each_entry(fi, &pd->fi_list, fi_link) {
if (fi->offset + fi->len == offset) {
/* The new fragment can be tacked on to the end */
/* Did the new fragment plug a hole? */
fi2 = list_entry(fi->fi_link.next,
struct fwnet_fragment_info, fi_link);
if (fi->offset + fi->len == fi2->offset) {
/* glue fragments together */
fi->len += len + fi2->len;
list_del(&fi2->fi_link);
kfree(fi2);
} else {
fi->len += len;
}
return fi;
}
if (offset + len == fi->offset) {
/* The new fragment can be tacked on to the beginning */
/* Did the new fragment plug a hole? */
fi2 = list_entry(fi->fi_link.prev,
struct fwnet_fragment_info, fi_link);
if (fi2->offset + fi2->len == fi->offset) {
/* glue fragments together */
fi2->len += fi->len + len;
list_del(&fi->fi_link);
kfree(fi);
return fi2;
}
fi->offset = offset;
fi->len += len;
return fi;
}
if (offset > fi->offset + fi->len) {
list = &fi->fi_link;
break;
}
if (offset + len < fi->offset) {
list = fi->fi_link.prev;
break;
}
}
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new)
return NULL;
new->offset = offset;
new->len = len;
list_add(&new->fi_link, list);
return new;
}
static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net,
struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size,
void *frag_buf, unsigned frag_off, unsigned frag_len)
{
struct fwnet_partial_datagram *new;
struct fwnet_fragment_info *fi;
new = kmalloc(sizeof(*new), GFP_ATOMIC);
if (!new)
goto fail;
INIT_LIST_HEAD(&new->fi_list);
fi = fwnet_frag_new(new, frag_off, frag_len);
if (fi == NULL)
goto fail_w_new;
new->datagram_label = datagram_label;
new->datagram_size = dg_size;
new->skb = dev_alloc_skb(dg_size + LL_RESERVED_SPACE(net));
if (new->skb == NULL)
goto fail_w_fi;
skb_reserve(new->skb, LL_RESERVED_SPACE(net));
new->pbuf = skb_put(new->skb, dg_size);
memcpy(new->pbuf + frag_off, frag_buf, frag_len);
list_add_tail(&new->pd_link, &peer->pd_list);
return new;
fail_w_fi:
kfree(fi);
fail_w_new:
kfree(new);
fail:
return NULL;
}
static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer,
u16 datagram_label)
{
struct fwnet_partial_datagram *pd;
list_for_each_entry(pd, &peer->pd_list, pd_link)
if (pd->datagram_label == datagram_label)
return pd;
return NULL;
}
static void fwnet_pd_delete(struct fwnet_partial_datagram *old)
{
struct fwnet_fragment_info *fi, *n;
list_for_each_entry_safe(fi, n, &old->fi_list, fi_link)
kfree(fi);
list_del(&old->pd_link);
dev_kfree_skb_any(old->skb);
kfree(old);
}
static bool fwnet_pd_update(struct fwnet_peer *peer,
struct fwnet_partial_datagram *pd, void *frag_buf,
unsigned frag_off, unsigned frag_len)
{
if (fwnet_frag_new(pd, frag_off, frag_len) == NULL)
return false;
memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
/*
* Move list entry to beginning of list so that oldest partial
* datagrams percolate to the end of the list
*/
list_move_tail(&pd->pd_link, &peer->pd_list);
return true;
}
static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd)
{
struct fwnet_fragment_info *fi;
fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link);
return fi->len == pd->datagram_size;
}
/* caller must hold dev->lock */
static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev,
u64 guid)
{
struct fwnet_peer *peer;
list_for_each_entry(peer, &dev->peer_list, peer_link)
if (peer->guid == guid)
return peer;
return NULL;
}
/* caller must hold dev->lock */
static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev,
int node_id, int generation)
{
struct fwnet_peer *peer;
list_for_each_entry(peer, &dev->peer_list, peer_link)
if (peer->node_id == node_id &&
peer->generation == generation)
return peer;
return NULL;
}
/* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */
static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed)
{
max_rec = min(max_rec, speed + 8);
max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */
return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE;
}
static int fwnet_finish_incoming_packet(struct net_device *net,
struct sk_buff *skb, u16 source_node_id,
bool is_broadcast, u16 ether_type)
{
int status, len;
switch (ether_type) {
case ETH_P_ARP:
case ETH_P_IP:
#if IS_ENABLED(CONFIG_IPV6)
case ETH_P_IPV6:
#endif
break;
default:
goto err;
}
/* Write metadata, and then pass to the receive level */
skb->dev = net;
skb->ip_summed = CHECKSUM_NONE;
/*
* Parse the encapsulation header. This actually does the job of
* converting to an ethernet-like pseudo frame header.
*/
if (dev_hard_header(skb, net, ether_type,
is_broadcast ? net->broadcast : net->dev_addr,
NULL, skb->len) >= 0) {
struct fwnet_header *eth;
u16 *rawp;
__be16 protocol;
skb_reset_mac_header(skb);
skb_pull(skb, sizeof(*eth));
eth = (struct fwnet_header *)skb_mac_header(skb);
if (fwnet_hwaddr_is_multicast(eth->h_dest)) {
if (memcmp(eth->h_dest, net->broadcast,
net->addr_len) == 0)
skb->pkt_type = PACKET_BROADCAST;
#if 0
else
skb->pkt_type = PACKET_MULTICAST;
#endif
} else {
if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
skb->pkt_type = PACKET_OTHERHOST;
}
if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
protocol = eth->h_proto;
} else {
rawp = (u16 *)skb->data;
if (*rawp == 0xffff)
protocol = htons(ETH_P_802_3);
else
protocol = htons(ETH_P_802_2);
}
skb->protocol = protocol;
}
len = skb->len;
status = netif_rx(skb);
if (status == NET_RX_DROP) {
net->stats.rx_errors++;
net->stats.rx_dropped++;
} else {
net->stats.rx_packets++;
net->stats.rx_bytes += len;
}
return 0;
err:
net->stats.rx_errors++;
net->stats.rx_dropped++;
dev_kfree_skb_any(skb);
return -ENOENT;
}
static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
int source_node_id, int generation,
bool is_broadcast)
{
struct sk_buff *skb;
struct net_device *net = dev->netdev;
struct rfc2734_header hdr;
unsigned lf;
unsigned long flags;
struct fwnet_peer *peer;
struct fwnet_partial_datagram *pd;
int fg_off;
int dg_size;
u16 datagram_label;
int retval;
u16 ether_type;
if (len <= RFC2374_UNFRAG_HDR_SIZE)
return 0;
hdr.w0 = be32_to_cpu(buf[0]);
lf = fwnet_get_hdr_lf(&hdr);
if (lf == RFC2374_HDR_UNFRAG) {
/*
* An unfragmented datagram has been received by the ieee1394
* bus. Build an skbuff around it so we can pass it to the
* high level network layer.
*/
ether_type = fwnet_get_hdr_ether_type(&hdr);
buf++;
len -= RFC2374_UNFRAG_HDR_SIZE;
skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net));
if (unlikely(!skb)) {
net->stats.rx_dropped++;
return -ENOMEM;
}
skb_reserve(skb, LL_RESERVED_SPACE(net));
skb_put_data(skb, buf, len);
return fwnet_finish_incoming_packet(net, skb, source_node_id,
is_broadcast, ether_type);
}
/* A datagram fragment has been received, now the fun begins. */
if (len <= RFC2374_FRAG_HDR_SIZE)
return 0;
hdr.w1 = ntohl(buf[1]);
buf += 2;
len -= RFC2374_FRAG_HDR_SIZE;
if (lf == RFC2374_HDR_FIRSTFRAG) {
ether_type = fwnet_get_hdr_ether_type(&hdr);
fg_off = 0;
} else {
ether_type = 0;
fg_off = fwnet_get_hdr_fg_off(&hdr);
}
datagram_label = fwnet_get_hdr_dgl(&hdr);
dg_size = fwnet_get_hdr_dg_size(&hdr);
if (fg_off + len > dg_size)
return 0;
spin_lock_irqsave(&dev->lock, flags);
peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
if (!peer) {
retval = -ENOENT;
goto fail;
}
pd = fwnet_pd_find(peer, datagram_label);
if (pd == NULL) {
while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) {
/* remove the oldest */
fwnet_pd_delete(list_first_entry(&peer->pd_list,
struct fwnet_partial_datagram, pd_link));
peer->pdg_size--;
}
pd = fwnet_pd_new(net, peer, datagram_label,
dg_size, buf, fg_off, len);
if (pd == NULL) {
retval = -ENOMEM;
goto fail;
}
peer->pdg_size++;
} else {
if (fwnet_frag_overlap(pd, fg_off, len) ||
pd->datagram_size != dg_size) {
/*
* Differing datagram sizes or overlapping fragments,
* discard old datagram and start a new one.
*/
fwnet_pd_delete(pd);
pd = fwnet_pd_new(net, peer, datagram_label,
dg_size, buf, fg_off, len);
if (pd == NULL) {
peer->pdg_size--;
retval = -ENOMEM;
goto fail;
}
} else {
if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
/*
* Couldn't save off fragment anyway
* so might as well obliterate the
* datagram now.
*/
fwnet_pd_delete(pd);
peer->pdg_size--;
retval = -ENOMEM;
goto fail;
}
}
} /* new datagram or add to existing one */
if (lf == RFC2374_HDR_FIRSTFRAG)
pd->ether_type = ether_type;
if (fwnet_pd_is_complete(pd)) {
ether_type = pd->ether_type;
peer->pdg_size--;
skb = skb_get(pd->skb);
fwnet_pd_delete(pd);
spin_unlock_irqrestore(&dev->lock, flags);
return fwnet_finish_incoming_packet(net, skb, source_node_id,
false, ether_type);
}
/*
* Datagram is not complete, we're done for the
* moment.
*/
retval = 0;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
return retval;
}
static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
int tcode, int destination, int source, int generation,
unsigned long long offset, void *payload, size_t length,
void *callback_data)
{
struct fwnet_device *dev = callback_data;
int rcode;
if (destination == IEEE1394_ALL_NODES) {
// Although the response to the broadcast packet is not necessarily required, the
// fw_send_response() function should still be called to maintain the reference
// counting of the object. In the case, the call of function just releases the
// object as a result to decrease the reference counting.
rcode = RCODE_COMPLETE;
} else if (offset != dev->handler.offset) {
rcode = RCODE_ADDRESS_ERROR;
} else if (tcode != TCODE_WRITE_BLOCK_REQUEST) {
rcode = RCODE_TYPE_ERROR;
} else if (fwnet_incoming_packet(dev, payload, length,
source, generation, false) != 0) {
dev_err(&dev->netdev->dev, "incoming packet failure\n");
rcode = RCODE_CONFLICT_ERROR;
} else {
rcode = RCODE_COMPLETE;
}
fw_send_response(card, r, rcode);
}
static int gasp_source_id(__be32 *p)
{
return be32_to_cpu(p[0]) >> 16;
}
static u32 gasp_specifier_id(__be32 *p)
{
return (be32_to_cpu(p[0]) & 0xffff) << 8 |
(be32_to_cpu(p[1]) & 0xff000000) >> 24;
}
static u32 gasp_version(__be32 *p)
{
return be32_to_cpu(p[1]) & 0xffffff;
}
static void fwnet_receive_broadcast(struct fw_iso_context *context,
u32 cycle, size_t header_length, void *header, void *data)
{
struct fwnet_device *dev;
struct fw_iso_packet packet;
__be16 *hdr_ptr;
__be32 *buf_ptr;
int retval;
u32 length;
unsigned long offset;
unsigned long flags;
dev = data;
hdr_ptr = header;
length = be16_to_cpup(hdr_ptr);
spin_lock_irqsave(&dev->lock, flags);
offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr;
buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++];
if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs)
dev->broadcast_rcv_next_ptr = 0;
spin_unlock_irqrestore(&dev->lock, flags);
if (length > IEEE1394_GASP_HDR_SIZE &&
gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
(gasp_version(buf_ptr) == RFC2734_SW_VERSION
#if IS_ENABLED(CONFIG_IPV6)
|| gasp_version(buf_ptr) == RFC3146_SW_VERSION
#endif
))
fwnet_incoming_packet(dev, buf_ptr + 2,
length - IEEE1394_GASP_HDR_SIZE,
gasp_source_id(buf_ptr),
context->card->generation, true);
packet.payload_length = dev->rcv_buffer_size;
packet.interrupt = 1;
packet.skip = 0;
packet.tag = 3;
packet.sy = 0;
packet.header_length = IEEE1394_GASP_HDR_SIZE;
spin_lock_irqsave(&dev->lock, flags);
retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet,
&dev->broadcast_rcv_buffer, offset);
spin_unlock_irqrestore(&dev->lock, flags);
if (retval >= 0)
fw_iso_context_queue_flush(dev->broadcast_rcv_context);
else
dev_err(&dev->netdev->dev, "requeue failed\n");
}
static struct kmem_cache *fwnet_packet_task_cache;
static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
{
dev_kfree_skb_any(ptask->skb);
kmem_cache_free(fwnet_packet_task_cache, ptask);
}
/* Caller must hold dev->lock. */
static void dec_queued_datagrams(struct fwnet_device *dev)
{
if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS)
netif_wake_queue(dev->netdev);
}
static int fwnet_send_packet(struct fwnet_packet_task *ptask);
static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev = ptask->dev;
struct sk_buff *skb = ptask->skb;
unsigned long flags;
bool free;
spin_lock_irqsave(&dev->lock, flags);
ptask->outstanding_pkts--;
/* Check whether we or the networking TX soft-IRQ is last user. */
free = (ptask->outstanding_pkts == 0 && ptask->enqueued);
if (free)
dec_queued_datagrams(dev);
if (ptask->outstanding_pkts == 0) {
dev->netdev->stats.tx_packets++;
dev->netdev->stats.tx_bytes += skb->len;
}
spin_unlock_irqrestore(&dev->lock, flags);
if (ptask->outstanding_pkts > 0) {
u16 dg_size;
u16 fg_off;
u16 datagram_label;
u16 lf;
/* Update the ptask to point to the next fragment and send it */
lf = fwnet_get_hdr_lf(&ptask->hdr);
switch (lf) {
case RFC2374_HDR_LASTFRAG:
case RFC2374_HDR_UNFRAG:
default:
dev_err(&dev->netdev->dev,
"outstanding packet %x lf %x, header %x,%x\n",
ptask->outstanding_pkts, lf, ptask->hdr.w0,
ptask->hdr.w1);
BUG();
case RFC2374_HDR_FIRSTFRAG:
/* Set frag type here for future interior fragments */
dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
break;
case RFC2374_HDR_INTFRAG:
dg_size = fwnet_get_hdr_dg_size(&ptask->hdr);
fg_off = fwnet_get_hdr_fg_off(&ptask->hdr)
+ ptask->max_payload - RFC2374_FRAG_HDR_SIZE;
datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
break;
}
if (ptask->dest_node == IEEE1394_ALL_NODES) {
skb_pull(skb,
ptask->max_payload + IEEE1394_GASP_HDR_SIZE);
} else {
skb_pull(skb, ptask->max_payload);
}
if (ptask->outstanding_pkts > 1) {
fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
dg_size, fg_off, datagram_label);
} else {
fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG,
dg_size, fg_off, datagram_label);
ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE;
}
fwnet_send_packet(ptask);
}
if (free)
fwnet_free_ptask(ptask);
}
static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev = ptask->dev;
unsigned long flags;
bool free;
spin_lock_irqsave(&dev->lock, flags);
/* One fragment failed; don't try to send remaining fragments. */
ptask->outstanding_pkts = 0;
/* Check whether we or the networking TX soft-IRQ is last user. */
free = ptask->enqueued;
if (free)
dec_queued_datagrams(dev);
dev->netdev->stats.tx_dropped++;
dev->netdev->stats.tx_errors++;
spin_unlock_irqrestore(&dev->lock, flags);
if (free)
fwnet_free_ptask(ptask);
}
static void fwnet_write_complete(struct fw_card *card, int rcode,
void *payload, size_t length, void *data)
{
struct fwnet_packet_task *ptask = data;
static unsigned long j;
static int last_rcode, errors_skipped;
if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask);
} else {
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
dev_err(&ptask->dev->netdev->dev,
"fwnet_write_complete failed: %x (skipped %d)\n",
rcode, errors_skipped);
errors_skipped = 0;
last_rcode = rcode;
} else {
errors_skipped++;
}
fwnet_transmit_packet_failed(ptask);
}
}
static int fwnet_send_packet(struct fwnet_packet_task *ptask)
{
struct fwnet_device *dev;
unsigned tx_len;
struct rfc2734_header *bufhdr;
unsigned long flags;
bool free;
dev = ptask->dev;
tx_len = ptask->max_payload;
switch (fwnet_get_hdr_lf(&ptask->hdr)) {
case RFC2374_HDR_UNFRAG:
bufhdr = skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE);
put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
break;
case RFC2374_HDR_FIRSTFRAG:
case RFC2374_HDR_INTFRAG:
case RFC2374_HDR_LASTFRAG:
bufhdr = skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE);
put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0);
put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1);
break;
default:
BUG();
}
if (ptask->dest_node == IEEE1394_ALL_NODES) {
u8 *p;
int generation;
int node_id;
unsigned int sw_version;
/* ptask->generation may not have been set yet */
generation = dev->card->generation;
smp_rmb();
node_id = dev->card->node_id;
switch (ptask->skb->protocol) {
default:
sw_version = RFC2734_SW_VERSION;
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
sw_version = RFC3146_SW_VERSION;
#endif
}
p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
| sw_version, &p[4]);
/* We should not transmit if broadcast_channel.valid == 0. */
fw_send_request(dev->card, &ptask->transaction,
TCODE_STREAM_DATA,
fw_stream_packet_destination_id(3,
IEEE1394_BROADCAST_CHANNEL, 0),
generation, SCODE_100, 0ULL, ptask->skb->data,
tx_len + 8, fwnet_write_complete, ptask);
spin_lock_irqsave(&dev->lock, flags);
/* If the AT tasklet already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
else
dec_queued_datagrams(dev);
spin_unlock_irqrestore(&dev->lock, flags);
goto out;
}
fw_send_request(dev->card, &ptask->transaction,
TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node,
ptask->generation, ptask->speed, ptask->fifo_addr,
ptask->skb->data, tx_len, fwnet_write_complete, ptask);
spin_lock_irqsave(&dev->lock, flags);
/* If the AT tasklet already ran, we may be last user. */
free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
if (!free)
ptask->enqueued = true;
else
dec_queued_datagrams(dev);
spin_unlock_irqrestore(&dev->lock, flags);
netif_trans_update(dev->netdev);
out:
if (free)
fwnet_free_ptask(ptask);
return 0;
}
static void fwnet_fifo_stop(struct fwnet_device *dev)
{
if (dev->local_fifo == FWNET_NO_FIFO_ADDR)
return;
fw_core_remove_address_handler(&dev->handler);
dev->local_fifo = FWNET_NO_FIFO_ADDR;
}
static int fwnet_fifo_start(struct fwnet_device *dev)
{
int retval;
if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
return 0;
dev->handler.length = 4096;
dev->handler.address_callback = fwnet_receive_packet;
dev->handler.callback_data = dev;
retval = fw_core_add_address_handler(&dev->handler,
&fw_high_memory_region);
if (retval < 0)
return retval;
dev->local_fifo = dev->handler.offset;
return 0;
}
static void __fwnet_broadcast_stop(struct fwnet_device *dev)
{
unsigned u;
if (dev->broadcast_state != FWNET_BROADCAST_ERROR) {
for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++)
kunmap(dev->broadcast_rcv_buffer.pages[u]);
fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
}
if (dev->broadcast_rcv_context) {
fw_iso_context_destroy(dev->broadcast_rcv_context);
dev->broadcast_rcv_context = NULL;
}
kfree(dev->broadcast_rcv_buffer_ptrs);
dev->broadcast_rcv_buffer_ptrs = NULL;
dev->broadcast_state = FWNET_BROADCAST_ERROR;
}
static void fwnet_broadcast_stop(struct fwnet_device *dev)
{
if (dev->broadcast_state == FWNET_BROADCAST_ERROR)
return;
fw_iso_context_stop(dev->broadcast_rcv_context);
__fwnet_broadcast_stop(dev);
}
static int fwnet_broadcast_start(struct fwnet_device *dev)
{
struct fw_iso_context *context;
int retval;
unsigned num_packets;
unsigned max_receive;
struct fw_iso_packet packet;
unsigned long offset;
void **ptrptr;
unsigned u;
if (dev->broadcast_state != FWNET_BROADCAST_ERROR)
return 0;
max_receive = 1U << (dev->card->max_receive + 1);
num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
ptrptr = kmalloc_array(num_packets, sizeof(void *), GFP_KERNEL);
if (!ptrptr) {
retval = -ENOMEM;
goto failed;
}
dev->broadcast_rcv_buffer_ptrs = ptrptr;
context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE,
IEEE1394_BROADCAST_CHANNEL,
dev->card->link_speed, 8,
fwnet_receive_broadcast, dev);
if (IS_ERR(context)) {
retval = PTR_ERR(context);
goto failed;
}
retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card,
FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
if (retval < 0)
goto failed;
dev->broadcast_state = FWNET_BROADCAST_STOPPED;
for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
void *ptr;
unsigned v;
ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
*ptrptr++ = (void *) ((char *)ptr + v * max_receive);
}
dev->broadcast_rcv_context = context;
packet.payload_length = max_receive;
packet.interrupt = 1;
packet.skip = 0;
packet.tag = 3;
packet.sy = 0;
packet.header_length = IEEE1394_GASP_HDR_SIZE;
offset = 0;
for (u = 0; u < num_packets; u++) {
retval = fw_iso_context_queue(context, &packet,
&dev->broadcast_rcv_buffer, offset);
if (retval < 0)
goto failed;
offset += max_receive;
}
dev->num_broadcast_rcv_ptrs = num_packets;
dev->rcv_buffer_size = max_receive;
dev->broadcast_rcv_next_ptr = 0U;
retval = fw_iso_context_start(context, -1, 0,
FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
if (retval < 0)
goto failed;
/* FIXME: adjust it according to the min. speed of all known peers? */
dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
- IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE;
dev->broadcast_state = FWNET_BROADCAST_RUNNING;
return 0;
failed:
__fwnet_broadcast_stop(dev);
return retval;
}
static void set_carrier_state(struct fwnet_device *dev)
{
if (dev->peer_count > 1)
netif_carrier_on(dev->netdev);
else
netif_carrier_off(dev->netdev);
}
/* ifup */
static int fwnet_open(struct net_device *net)
{
struct fwnet_device *dev = netdev_priv(net);
int ret;
ret = fwnet_broadcast_start(dev);
if (ret)
return ret;
netif_start_queue(net);
spin_lock_irq(&dev->lock);
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
return 0;
}
/* ifdown */
static int fwnet_stop(struct net_device *net)
{
struct fwnet_device *dev = netdev_priv(net);
netif_stop_queue(net);
fwnet_broadcast_stop(dev);
return 0;
}
static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
{
struct fwnet_header hdr_buf;
struct fwnet_device *dev = netdev_priv(net);
__be16 proto;
u16 dest_node;
unsigned max_payload;
u16 dg_size;
u16 *datagram_label_ptr;
struct fwnet_packet_task *ptask;
struct fwnet_peer *peer;
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
/* Can this happen? */
if (netif_queue_stopped(dev->netdev)) {
spin_unlock_irqrestore(&dev->lock, flags);
return NETDEV_TX_BUSY;
}
ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
if (ptask == NULL)
goto fail;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
goto fail;
/*
* Make a copy of the driver-specific header.
* We might need to rebuild the header on tx failure.
*/
memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
proto = hdr_buf.h_proto;
switch (proto) {
case htons(ETH_P_ARP):
case htons(ETH_P_IP):
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
#endif
break;
default:
goto fail;
}
skb_pull(skb, sizeof(hdr_buf));
dg_size = skb->len;
/*
* Set the transmission type for the packet. ARP packets and IP
* broadcast packets are sent via GASP.
*/
if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) {
max_payload = dev->broadcast_xmt_max_payload;
datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
ptask->fifo_addr = FWNET_NO_FIFO_ADDR;
ptask->generation = 0;
ptask->dest_node = IEEE1394_ALL_NODES;
ptask->speed = SCODE_100;
} else {
union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest;
__be64 guid = get_unaligned(&ha->uc.uniq_id);
u8 generation;
peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
if (!peer)
goto fail;
generation = peer->generation;
dest_node = peer->node_id;
max_payload = peer->max_payload;
datagram_label_ptr = &peer->datagram_label;
ptask->fifo_addr = get_unaligned_be48(ha->uc.fifo);
ptask->generation = generation;
ptask->dest_node = dest_node;
ptask->speed = peer->speed;
}
ptask->hdr.w0 = 0;
ptask->hdr.w1 = 0;
ptask->skb = skb;
ptask->dev = dev;
/* Does it all fit in one packet? */
if (dg_size <= max_payload) {
fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto));
ptask->outstanding_pkts = 1;
max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE;
} else {
u16 datagram_label;
max_payload -= RFC2374_FRAG_OVERHEAD;
datagram_label = (*datagram_label_ptr)++;
fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size,
datagram_label);
ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload);
max_payload += RFC2374_FRAG_HDR_SIZE;
}
if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS)
netif_stop_queue(dev->netdev);
spin_unlock_irqrestore(&dev->lock, flags);
ptask->max_payload = max_payload;
ptask->enqueued = 0;
fwnet_send_packet(ptask);
return NETDEV_TX_OK;
fail:
spin_unlock_irqrestore(&dev->lock, flags);
if (ptask)
kmem_cache_free(fwnet_packet_task_cache, ptask);
if (skb != NULL)
dev_kfree_skb(skb);
net->stats.tx_dropped++;
net->stats.tx_errors++;
/*
* FIXME: According to a patch from 2003-02-26, "returning non-zero
* causes serious problems" here, allegedly. Before that patch,
* -ERRNO was returned which is not appropriate under Linux 2.6.
* Perhaps more needs to be done? Stop the queue in serious
* conditions and restart it elsewhere?
*/
return NETDEV_TX_OK;
}
static const struct ethtool_ops fwnet_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
static const struct net_device_ops fwnet_netdev_ops = {
.ndo_open = fwnet_open,
.ndo_stop = fwnet_stop,
.ndo_start_xmit = fwnet_tx,
};
static void fwnet_init_dev(struct net_device *net)
{
net->header_ops = &fwnet_header_ops;
net->netdev_ops = &fwnet_netdev_ops;
net->watchdog_timeo = 2 * HZ;
net->flags = IFF_BROADCAST | IFF_MULTICAST;
net->features = NETIF_F_HIGHDMA;
net->addr_len = FWNET_ALEN;
net->hard_header_len = FWNET_HLEN;
net->type = ARPHRD_IEEE1394;
net->tx_queue_len = FWNET_TX_QUEUE_LEN;
net->ethtool_ops = &fwnet_ethtool_ops;
}
/* caller must hold fwnet_device_mutex */
static struct fwnet_device *fwnet_dev_find(struct fw_card *card)
{
struct fwnet_device *dev;
list_for_each_entry(dev, &fwnet_device_list, dev_link)
if (dev->card == card)
return dev;
return NULL;
}
static int fwnet_add_peer(struct fwnet_device *dev,
struct fw_unit *unit, struct fw_device *device)
{
struct fwnet_peer *peer;
peer = kmalloc(sizeof(*peer), GFP_KERNEL);
if (!peer)
return -ENOMEM;
dev_set_drvdata(&unit->device, peer);
peer->dev = dev;
peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
INIT_LIST_HEAD(&peer->pd_list);
peer->pdg_size = 0;
peer->datagram_label = 0;
peer->speed = device->max_speed;
peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed);
peer->generation = device->generation;
smp_rmb();
peer->node_id = device->node_id;
spin_lock_irq(&dev->lock);
list_add_tail(&peer->peer_link, &dev->peer_list);
dev->peer_count++;
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
return 0;
}
static int fwnet_probe(struct fw_unit *unit,
const struct ieee1394_device_id *id)
{
struct fw_device *device = fw_parent_device(unit);
struct fw_card *card = device->card;
struct net_device *net;
bool allocated_netdev = false;
struct fwnet_device *dev;
union fwnet_hwaddr ha;
int ret;
mutex_lock(&fwnet_device_mutex);
dev = fwnet_dev_find(card);
if (dev) {
net = dev->netdev;
goto have_dev;
}
net = alloc_netdev(sizeof(*dev), "firewire%d", NET_NAME_UNKNOWN,
fwnet_init_dev);
if (net == NULL) {
mutex_unlock(&fwnet_device_mutex);
return -ENOMEM;
}
allocated_netdev = true;
SET_NETDEV_DEV(net, card->device);
dev = netdev_priv(net);
spin_lock_init(&dev->lock);
dev->broadcast_state = FWNET_BROADCAST_ERROR;
dev->broadcast_rcv_context = NULL;
dev->broadcast_xmt_max_payload = 0;
dev->broadcast_xmt_datagramlabel = 0;
dev->local_fifo = FWNET_NO_FIFO_ADDR;
dev->queued_datagrams = 0;
INIT_LIST_HEAD(&dev->peer_list);
dev->card = card;
dev->netdev = net;
ret = fwnet_fifo_start(dev);
if (ret < 0)
goto out;
dev->local_fifo = dev->handler.offset;
/*
* default MTU: RFC 2734 cl. 4, RFC 3146 cl. 4
* maximum MTU: RFC 2734 cl. 4.2, fragment encapsulation header's
* maximum possible datagram_size + 1 = 0xfff + 1
*/
net->mtu = 1500U;
net->min_mtu = ETH_MIN_MTU;
net->max_mtu = 4096U;
/* Set our hardware address while we're at it */
ha.uc.uniq_id = cpu_to_be64(card->guid);
ha.uc.max_rec = dev->card->max_receive;
ha.uc.sspd = dev->card->link_speed;
put_unaligned_be48(dev->local_fifo, ha.uc.fifo);
dev_addr_set(net, ha.u);
memset(net->broadcast, -1, net->addr_len);
ret = register_netdev(net);
if (ret)
goto out;
list_add_tail(&dev->dev_link, &fwnet_device_list);
dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n",
dev_name(card->device));
have_dev:
ret = fwnet_add_peer(dev, unit, device);
if (ret && allocated_netdev) {
unregister_netdev(net);
list_del(&dev->dev_link);
out:
fwnet_fifo_stop(dev);
free_netdev(net);
}
mutex_unlock(&fwnet_device_mutex);
return ret;
}
/*
* FIXME abort partially sent fragmented datagrams,
* discard partially received fragmented datagrams
*/
static void fwnet_update(struct fw_unit *unit)
{
struct fw_device *device = fw_parent_device(unit);
struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
int generation;
generation = device->generation;
spin_lock_irq(&peer->dev->lock);
peer->node_id = device->node_id;
peer->generation = generation;
spin_unlock_irq(&peer->dev->lock);
}
static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev)
{
struct fwnet_partial_datagram *pd, *pd_next;
spin_lock_irq(&dev->lock);
list_del(&peer->peer_link);
dev->peer_count--;
set_carrier_state(dev);
spin_unlock_irq(&dev->lock);
list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
fwnet_pd_delete(pd);
kfree(peer);
}
static void fwnet_remove(struct fw_unit *unit)
{
struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
struct fwnet_device *dev = peer->dev;
struct net_device *net;
int i;
mutex_lock(&fwnet_device_mutex);
net = dev->netdev;
fwnet_remove_peer(peer, dev);
if (list_empty(&dev->peer_list)) {
unregister_netdev(net);
fwnet_fifo_stop(dev);
for (i = 0; dev->queued_datagrams && i < 5; i++)
ssleep(1);
WARN_ON(dev->queued_datagrams);
list_del(&dev->dev_link);
free_netdev(net);
}
mutex_unlock(&fwnet_device_mutex);
}
static const struct ieee1394_device_id fwnet_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.specifier_id = IANA_SPECIFIER_ID,
.version = RFC2734_SW_VERSION,
},
#if IS_ENABLED(CONFIG_IPV6)
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION,
.specifier_id = IANA_SPECIFIER_ID,
.version = RFC3146_SW_VERSION,
},
#endif
{ }
};
static struct fw_driver fwnet_driver = {
.driver = {
.owner = THIS_MODULE,
.name = KBUILD_MODNAME,
.bus = &fw_bus_type,
},
.probe = fwnet_probe,
.update = fwnet_update,
.remove = fwnet_remove,
.id_table = fwnet_id_table,
};
static const u32 rfc2374_unit_directory_data[] = {
0x00040000, /* directory_length */
0x1200005e, /* unit_specifier_id: IANA */
0x81000003, /* textual descriptor offset */
0x13000001, /* unit_sw_version: RFC 2734 */
0x81000005, /* textual descriptor offset */
0x00030000, /* descriptor_length */
0x00000000, /* text */
0x00000000, /* minimal ASCII, en */
0x49414e41, /* I A N A */
0x00030000, /* descriptor_length */
0x00000000, /* text */
0x00000000, /* minimal ASCII, en */
0x49507634, /* I P v 4 */
};
static struct fw_descriptor rfc2374_unit_directory = {
.length = ARRAY_SIZE(rfc2374_unit_directory_data),
.key = (CSR_DIRECTORY | CSR_UNIT) << 24,
.data = rfc2374_unit_directory_data
};
#if IS_ENABLED(CONFIG_IPV6)
static const u32 rfc3146_unit_directory_data[] = {
0x00040000, /* directory_length */
0x1200005e, /* unit_specifier_id: IANA */
0x81000003, /* textual descriptor offset */
0x13000002, /* unit_sw_version: RFC 3146 */
0x81000005, /* textual descriptor offset */
0x00030000, /* descriptor_length */
0x00000000, /* text */
0x00000000, /* minimal ASCII, en */
0x49414e41, /* I A N A */
0x00030000, /* descriptor_length */
0x00000000, /* text */
0x00000000, /* minimal ASCII, en */
0x49507636, /* I P v 6 */
};
static struct fw_descriptor rfc3146_unit_directory = {
.length = ARRAY_SIZE(rfc3146_unit_directory_data),
.key = (CSR_DIRECTORY | CSR_UNIT) << 24,
.data = rfc3146_unit_directory_data
};
#endif
static int __init fwnet_init(void)
{
int err;
err = fw_core_add_descriptor(&rfc2374_unit_directory);
if (err)
return err;
#if IS_ENABLED(CONFIG_IPV6)
err = fw_core_add_descriptor(&rfc3146_unit_directory);
if (err)
goto out;
#endif
fwnet_packet_task_cache = kmem_cache_create("packet_task",
sizeof(struct fwnet_packet_task), 0, 0, NULL);
if (!fwnet_packet_task_cache) {
err = -ENOMEM;
goto out2;
}
err = driver_register(&fwnet_driver.driver);
if (!err)
return 0;
kmem_cache_destroy(fwnet_packet_task_cache);
out2:
#if IS_ENABLED(CONFIG_IPV6)
fw_core_remove_descriptor(&rfc3146_unit_directory);
out:
#endif
fw_core_remove_descriptor(&rfc2374_unit_directory);
return err;
}
module_init(fwnet_init);
static void __exit fwnet_cleanup(void)
{
driver_unregister(&fwnet_driver.driver);
kmem_cache_destroy(fwnet_packet_task_cache);
#if IS_ENABLED(CONFIG_IPV6)
fw_core_remove_descriptor(&rfc3146_unit_directory);
#endif
fw_core_remove_descriptor(&rfc2374_unit_directory);
}
module_exit(fwnet_cleanup);
MODULE_AUTHOR("Jay Fenlason <[email protected]>");
MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
| linux-master | drivers/firewire/net.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers
*
* Copyright (C) 2006-2007 Bernhard Kaindl <[email protected]>
*
* Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c
* this file has functions to:
* - scan the PCI very early on boot for all OHCI 1394-compliant controllers
* - reset and initialize them and make them join the IEEE1394 bus and
* - enable physical DMA on them to allow remote debugging
*
* All code and data is marked as __init and __initdata, respective as
* during boot, all OHCI1394 controllers may be claimed by the firewire
* stack and at this point, this code should not touch them anymore.
*
* To use physical DMA after the initialization of the firewire stack,
* be sure that the stack enables it and (re-)attach after the bus reset
* which may be caused by the firewire stack initialization.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/pci.h> /* for PCI defines */
#include <linux/string.h>
#include <asm/pci-direct.h> /* for direct PCI config space access */
#include <asm/fixmap.h>
#include <linux/init_ohci1394_dma.h>
#include "ohci.h"
int __initdata init_ohci1394_dma_early;
struct ohci {
void __iomem *registers;
};
static inline void reg_write(const struct ohci *ohci, int offset, u32 data)
{
writel(data, ohci->registers + offset);
}
static inline u32 reg_read(const struct ohci *ohci, int offset)
{
return readl(ohci->registers + offset);
}
#define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */
/* Reads a PHY register of an OHCI-1394 controller */
static inline u8 __init get_phy_reg(struct ohci *ohci, u8 addr)
{
int i;
u32 r;
reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
for (i = 0; i < OHCI_LOOP_COUNT; i++) {
if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
break;
mdelay(1);
}
r = reg_read(ohci, OHCI1394_PhyControl);
return (r & 0x00ff0000) >> 16;
}
/* Writes to a PHY register of an OHCI-1394 controller */
static inline void __init set_phy_reg(struct ohci *ohci, u8 addr, u8 data)
{
int i;
reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
for (i = 0; i < OHCI_LOOP_COUNT; i++) {
if (!(reg_read(ohci, OHCI1394_PhyControl) & 0x00004000))
break;
mdelay(1);
}
}
/* Resets an OHCI-1394 controller (for sane state before initialization) */
static inline void __init init_ohci1394_soft_reset(struct ohci *ohci)
{
int i;
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
for (i = 0; i < OHCI_LOOP_COUNT; i++) {
if (!(reg_read(ohci, OHCI1394_HCControlSet)
& OHCI1394_HCControl_softReset))
break;
mdelay(1);
}
}
#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
/* Basic OHCI-1394 register and port inititalization */
static inline void __init init_ohci1394_initialize(struct ohci *ohci)
{
u32 bus_options;
int num_ports, i;
/* Put some defaults to these undefined bus options */
bus_options = reg_read(ohci, OHCI1394_BusOptions);
bus_options |= 0x60000000; /* Enable CMC and ISC */
bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
bus_options &= ~0x18000000; /* Disable PMC and BMC */
reg_write(ohci, OHCI1394_BusOptions, bus_options);
/* Set the bus number */
reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
/* Enable posted writes */
reg_write(ohci, OHCI1394_HCControlSet,
OHCI1394_HCControl_postedWriteEnable);
/* Clear link control register */
reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
/* enable phys */
reg_write(ohci, OHCI1394_LinkControlSet,
OHCI1394_LinkControl_rcvPhyPkt);
/* Don't accept phy packets into AR request context */
reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
/* Clear the Isochonouys interrupt masks */
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
/* Accept asynchronous transfer requests from all nodes for now */
reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
/* Specify asynchronous transfer retries */
reg_write(ohci, OHCI1394_ATRetries,
OHCI1394_MAX_AT_REQ_RETRIES |
(OHCI1394_MAX_AT_RESP_RETRIES<<4) |
(OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
/* We don't want hardware swapping */
reg_write(ohci, OHCI1394_HCControlClear,
OHCI1394_HCControl_noByteSwapData);
/* Enable link */
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
/* If anything is connected to a port, make sure it is enabled */
num_ports = get_phy_reg(ohci, 2) & 0xf;
for (i = 0; i < num_ports; i++) {
unsigned int status;
set_phy_reg(ohci, 7, i);
status = get_phy_reg(ohci, 8);
if (status & 0x20)
set_phy_reg(ohci, 8, status & ~1);
}
}
/**
* init_ohci1394_wait_for_busresets - wait until bus resets are completed
*
* OHCI1394 initialization itself and any device going on- or offline
* and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec
* specifies that physical DMA is disabled on each bus reset and it
* has to be enabled after each bus reset when needed. We resort
* to polling here because on early boot, we have no interrupts.
*/
static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci)
{
int i, events;
for (i = 0; i < 9; i++) {
mdelay(200);
events = reg_read(ohci, OHCI1394_IntEventSet);
if (events & OHCI1394_busReset)
reg_write(ohci, OHCI1394_IntEventClear,
OHCI1394_busReset);
}
}
/**
* init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging
* This enables remote DMA access over IEEE1394 from every host for the low
* 4GB of address space. DMA accesses above 4GB are not available currently.
*/
static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci)
{
reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 0xffffffff);
reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 0xffffffff);
reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
}
/**
* init_ohci1394_reset_and_init_dma - init controller and enable DMA
* This initializes the given controller and enables physical DMA engine in it.
*/
static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci)
{
/* Start off with a soft reset, clears everything to a sane state. */
init_ohci1394_soft_reset(ohci);
/* Accessing some registers without LPS enabled may cause lock up */
reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
/* Disable and clear interrupts */
reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
mdelay(50); /* Wait 50msec to make sure we have full link enabled */
init_ohci1394_initialize(ohci);
/*
* The initialization causes at least one IEEE1394 bus reset. Enabling
* physical DMA only works *after* *all* bus resets have calmed down:
*/
init_ohci1394_wait_for_busresets(ohci);
/* We had to wait and do this now if we want to debug early problems */
init_ohci1394_enable_physical_dma(ohci);
}
/**
* init_ohci1394_controller - Map the registers of the controller and init DMA
* This maps the registers of the specified controller and initializes it
*/
static inline void __init init_ohci1394_controller(int num, int slot, int func)
{
unsigned long ohci_base;
struct ohci ohci;
printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394"
" at %02x:%02x.%x\n", num, slot, func);
ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2))
& PCI_BASE_ADDRESS_MEM_MASK;
set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base);
ohci.registers = (void __iomem *)fix_to_virt(FIX_OHCI1394_BASE);
init_ohci1394_reset_and_init_dma(&ohci);
}
/**
* init_ohci1394_dma_on_all_controllers - scan for OHCI1394 controllers and init DMA on them
* Scans the whole PCI space for OHCI1394 controllers and inits DMA on them
*/
void __init init_ohci1394_dma_on_all_controllers(void)
{
int num, slot, func;
u32 class;
if (!early_pci_allowed())
return;
/* Poor man's PCI discovery, the only thing we can do at early boot */
for (num = 0; num < 32; num++) {
for (slot = 0; slot < 32; slot++) {
for (func = 0; func < 8; func++) {
class = read_pci_config(num, slot, func,
PCI_CLASS_REVISION);
if (class == 0xffffffff)
continue; /* No device at this func */
if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
continue; /* Not an OHCI-1394 device */
init_ohci1394_controller(num, slot, func);
break; /* Assume one controller per device */
}
}
}
printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n");
}
/**
* setup_ohci1394_dma - enables early OHCI1394 DMA initialization
*/
static int __init setup_ohci1394_dma(char *opt)
{
if (!strcmp(opt, "early"))
init_ohci1394_dma_early = 1;
return 0;
}
/* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */
early_param("ohci1394_dma", setup_ohci1394_dma);
| linux-master | drivers/firewire/init_ohci1394_dma.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Device probing and sysfs code.
*
* Copyright (C) 2005-2006 Kristian Hoegsberg <[email protected]>
*/
#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-constants.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <linux/atomic.h>
#include <asm/byteorder.h>
#include "core.h"
void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
{
ci->p = p + 1;
ci->end = ci->p + (p[0] >> 16);
}
EXPORT_SYMBOL(fw_csr_iterator_init);
int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
{
*key = *ci->p >> 24;
*value = *ci->p & 0xffffff;
return ci->p++ < ci->end;
}
EXPORT_SYMBOL(fw_csr_iterator_next);
static const u32 *search_leaf(const u32 *directory, int search_key)
{
struct fw_csr_iterator ci;
int last_key = 0, key, value;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (last_key == search_key &&
key == (CSR_DESCRIPTOR | CSR_LEAF))
return ci.p - 1 + value;
last_key = key;
}
return NULL;
}
static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
{
unsigned int quadlets, i;
char c;
if (!size || !buf)
return -EINVAL;
quadlets = min(block[0] >> 16, 256U);
if (quadlets < 2)
return -ENODATA;
if (block[1] != 0 || block[2] != 0)
/* unknown language/character set */
return -ENODATA;
block += 3;
quadlets -= 2;
for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
c = block[i / 4] >> (24 - 8 * (i % 4));
if (c == '\0')
break;
buf[i] = c;
}
buf[i] = '\0';
return i;
}
/**
* fw_csr_string() - reads a string from the configuration ROM
* @directory: e.g. root directory or unit directory
* @key: the key of the preceding directory entry
* @buf: where to put the string
* @size: size of @buf, in bytes
*
* The string is taken from a minimal ASCII text descriptor leaf after
* the immediate entry with @key. The string is zero-terminated.
* An overlong string is silently truncated such that it and the
* zero byte fit into @size.
*
* Returns strlen(buf) or a negative error code.
*/
int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
{
const u32 *leaf = search_leaf(directory, key);
if (!leaf)
return -ENOENT;
return textual_leaf_to_string(leaf, buf, size);
}
EXPORT_SYMBOL(fw_csr_string);
static void get_ids(const u32 *directory, int *id)
{
struct fw_csr_iterator ci;
int key, value;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
switch (key) {
case CSR_VENDOR: id[0] = value; break;
case CSR_MODEL: id[1] = value; break;
case CSR_SPECIFIER_ID: id[2] = value; break;
case CSR_VERSION: id[3] = value; break;
}
}
}
static void get_modalias_ids(const struct fw_unit *unit, int *id)
{
get_ids(&fw_parent_device(unit)->config_rom[5], id);
get_ids(unit->directory, id);
}
static bool match_ids(const struct ieee1394_device_id *id_table, int *id)
{
int match = 0;
if (id[0] == id_table->vendor_id)
match |= IEEE1394_MATCH_VENDOR_ID;
if (id[1] == id_table->model_id)
match |= IEEE1394_MATCH_MODEL_ID;
if (id[2] == id_table->specifier_id)
match |= IEEE1394_MATCH_SPECIFIER_ID;
if (id[3] == id_table->version)
match |= IEEE1394_MATCH_VERSION;
return (match & id_table->match_flags) == id_table->match_flags;
}
static const struct ieee1394_device_id *unit_match(struct device *dev,
struct device_driver *drv)
{
const struct ieee1394_device_id *id_table =
container_of(drv, struct fw_driver, driver)->id_table;
int id[] = {0, 0, 0, 0};
get_modalias_ids(fw_unit(dev), id);
for (; id_table->match_flags != 0; id_table++)
if (match_ids(id_table, id))
return id_table;
return NULL;
}
static bool is_fw_unit(struct device *dev);
static int fw_unit_match(struct device *dev, struct device_driver *drv)
{
/* We only allow binding to fw_units. */
return is_fw_unit(dev) && unit_match(dev, drv) != NULL;
}
static int fw_unit_probe(struct device *dev)
{
struct fw_driver *driver =
container_of(dev->driver, struct fw_driver, driver);
return driver->probe(fw_unit(dev), unit_match(dev, dev->driver));
}
static void fw_unit_remove(struct device *dev)
{
struct fw_driver *driver =
container_of(dev->driver, struct fw_driver, driver);
driver->remove(fw_unit(dev));
}
static int get_modalias(const struct fw_unit *unit, char *buffer, size_t buffer_size)
{
int id[] = {0, 0, 0, 0};
get_modalias_ids(unit, id);
return snprintf(buffer, buffer_size,
"ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
id[0], id[1], id[2], id[3]);
}
static int fw_unit_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct fw_unit *unit = fw_unit(dev);
char modalias[64];
get_modalias(unit, modalias, sizeof(modalias));
if (add_uevent_var(env, "MODALIAS=%s", modalias))
return -ENOMEM;
return 0;
}
struct bus_type fw_bus_type = {
.name = "firewire",
.match = fw_unit_match,
.probe = fw_unit_probe,
.remove = fw_unit_remove,
};
EXPORT_SYMBOL(fw_bus_type);
int fw_device_enable_phys_dma(struct fw_device *device)
{
int generation = device->generation;
/* device->node_id, accessed below, must not be older than generation */
smp_rmb();
return device->card->driver->enable_phys_dma(device->card,
device->node_id,
generation);
}
EXPORT_SYMBOL(fw_device_enable_phys_dma);
struct config_rom_attribute {
struct device_attribute attr;
u32 key;
};
static ssize_t show_immediate(struct device *dev,
struct device_attribute *dattr, char *buf)
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
const u32 *dir;
int key, value, ret = -ENOENT;
down_read(&fw_device_rwsem);
if (is_fw_unit(dev))
dir = fw_unit(dev)->directory;
else
dir = fw_device(dev)->config_rom + 5;
fw_csr_iterator_init(&ci, dir);
while (fw_csr_iterator_next(&ci, &key, &value))
if (attr->key == key) {
ret = snprintf(buf, buf ? PAGE_SIZE : 0,
"0x%06x\n", value);
break;
}
up_read(&fw_device_rwsem);
return ret;
}
#define IMMEDIATE_ATTR(name, key) \
{ __ATTR(name, S_IRUGO, show_immediate, NULL), key }
static ssize_t show_text_leaf(struct device *dev,
struct device_attribute *dattr, char *buf)
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
const u32 *dir;
size_t bufsize;
char dummy_buf[2];
int ret;
down_read(&fw_device_rwsem);
if (is_fw_unit(dev))
dir = fw_unit(dev)->directory;
else
dir = fw_device(dev)->config_rom + 5;
if (buf) {
bufsize = PAGE_SIZE - 1;
} else {
buf = dummy_buf;
bufsize = 1;
}
ret = fw_csr_string(dir, attr->key, buf, bufsize);
if (ret >= 0) {
/* Strip trailing whitespace and add newline. */
while (ret > 0 && isspace(buf[ret - 1]))
ret--;
strcpy(buf + ret, "\n");
ret++;
}
up_read(&fw_device_rwsem);
return ret;
}
#define TEXT_LEAF_ATTR(name, key) \
{ __ATTR(name, S_IRUGO, show_text_leaf, NULL), key }
static struct config_rom_attribute config_rom_attributes[] = {
IMMEDIATE_ATTR(vendor, CSR_VENDOR),
IMMEDIATE_ATTR(hardware_version, CSR_HARDWARE_VERSION),
IMMEDIATE_ATTR(specifier_id, CSR_SPECIFIER_ID),
IMMEDIATE_ATTR(version, CSR_VERSION),
IMMEDIATE_ATTR(model, CSR_MODEL),
TEXT_LEAF_ATTR(vendor_name, CSR_VENDOR),
TEXT_LEAF_ATTR(model_name, CSR_MODEL),
TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
};
static void init_fw_attribute_group(struct device *dev,
struct device_attribute *attrs,
struct fw_attribute_group *group)
{
struct device_attribute *attr;
int i, j;
for (j = 0; attrs[j].attr.name != NULL; j++)
group->attrs[j] = &attrs[j].attr;
for (i = 0; i < ARRAY_SIZE(config_rom_attributes); i++) {
attr = &config_rom_attributes[i].attr;
if (attr->show(dev, attr, NULL) < 0)
continue;
group->attrs[j++] = &attr->attr;
}
group->attrs[j] = NULL;
group->groups[0] = &group->group;
group->groups[1] = NULL;
group->group.attrs = group->attrs;
dev->groups = (const struct attribute_group **) group->groups;
}
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_unit *unit = fw_unit(dev);
int length;
length = get_modalias(unit, buf, PAGE_SIZE);
strcpy(buf + length, "\n");
return length + 1;
}
static ssize_t rom_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev->parent);
struct fw_unit *unit = fw_unit(dev);
return sysfs_emit(buf, "%td\n", unit->directory - device->config_rom);
}
static struct device_attribute fw_unit_attributes[] = {
__ATTR_RO(modalias),
__ATTR_RO(rom_index),
__ATTR_NULL,
};
static ssize_t config_rom_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
size_t length;
down_read(&fw_device_rwsem);
length = device->config_rom_length * 4;
memcpy(buf, device->config_rom, length);
up_read(&fw_device_rwsem);
return length;
}
static ssize_t guid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
int ret;
down_read(&fw_device_rwsem);
ret = sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]);
up_read(&fw_device_rwsem);
return ret;
}
static ssize_t is_local_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
return sprintf(buf, "%u\n", device->is_local);
}
static int units_sprintf(char *buf, const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
int specifier_id = 0;
int version = 0;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
switch (key) {
case CSR_SPECIFIER_ID:
specifier_id = value;
break;
case CSR_VERSION:
version = value;
break;
}
}
return sprintf(buf, "0x%06x:0x%06x ", specifier_id, version);
}
static ssize_t units_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
struct fw_csr_iterator ci;
int key, value, i = 0;
down_read(&fw_device_rwsem);
fw_csr_iterator_init(&ci, &device->config_rom[5]);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (key != (CSR_UNIT | CSR_DIRECTORY))
continue;
i += units_sprintf(&buf[i], ci.p + value - 1);
if (i >= PAGE_SIZE - (8 + 1 + 8 + 1))
break;
}
up_read(&fw_device_rwsem);
if (i)
buf[i - 1] = '\n';
return i;
}
static struct device_attribute fw_device_attributes[] = {
__ATTR_RO(config_rom),
__ATTR_RO(guid),
__ATTR_RO(is_local),
__ATTR_RO(units),
__ATTR_NULL,
};
static int read_rom(struct fw_device *device,
int generation, int index, u32 *data)
{
u64 offset = (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + index * 4;
int i, rcode;
/* device->node_id, accessed below, must not be older than generation */
smp_rmb();
for (i = 10; i < 100; i += 10) {
rcode = fw_run_transaction(device->card,
TCODE_READ_QUADLET_REQUEST, device->node_id,
generation, device->max_speed, offset, data, 4);
if (rcode != RCODE_BUSY)
break;
msleep(i);
}
be32_to_cpus(data);
return rcode;
}
#define MAX_CONFIG_ROM_SIZE 256
/*
* Read the bus info block, perform a speed probe, and read all of the rest of
* the config ROM. We do all this with a cached bus generation. If the bus
* generation changes under us, read_config_rom will fail and get retried.
* It's better to start all over in this case because the node from which we
* are reading the ROM may have changed the ROM during the reset.
* Returns either a result code or a negative error code.
*/
static int read_config_rom(struct fw_device *device, int generation)
{
struct fw_card *card = device->card;
const u32 *old_rom, *new_rom;
u32 *rom, *stack;
u32 sp, key;
int i, end, length, ret;
rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
if (rom == NULL)
return -ENOMEM;
stack = &rom[MAX_CONFIG_ROM_SIZE];
memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
device->max_speed = SCODE_100;
/* First read the bus info block. */
for (i = 0; i < 5; i++) {
ret = read_rom(device, generation, i, &rom[i]);
if (ret != RCODE_COMPLETE)
goto out;
/*
* As per IEEE1212 7.2, during initialization, devices can
* reply with a 0 for the first quadlet of the config
* rom to indicate that they are booting (for example,
* if the firmware is on the disk of a external
* harddisk). In that case we just fail, and the
* retry mechanism will try again later.
*/
if (i == 0 && rom[i] == 0) {
ret = RCODE_BUSY;
goto out;
}
}
device->max_speed = device->node->max_speed;
/*
* Determine the speed of
* - devices with link speed less than PHY speed,
* - devices with 1394b PHY (unless only connected to 1394a PHYs),
* - all devices if there are 1394b repeaters.
* Note, we cannot use the bus info block's link_spd as starting point
* because some buggy firmwares set it lower than necessary and because
* 1394-1995 nodes do not have the field.
*/
if ((rom[2] & 0x7) < device->max_speed ||
device->max_speed == SCODE_BETA ||
card->beta_repeaters_present) {
u32 dummy;
/* for S1600 and S3200 */
if (device->max_speed == SCODE_BETA)
device->max_speed = card->link_speed;
while (device->max_speed > SCODE_100) {
if (read_rom(device, generation, 0, &dummy) ==
RCODE_COMPLETE)
break;
device->max_speed--;
}
}
/*
* Now parse the config rom. The config rom is a recursive
* directory structure so we parse it using a stack of
* references to the blocks that make up the structure. We
* push a reference to the root directory on the stack to
* start things off.
*/
length = i;
sp = 0;
stack[sp++] = 0xc0000005;
while (sp > 0) {
/*
* Pop the next block reference of the stack. The
* lower 24 bits is the offset into the config rom,
* the upper 8 bits are the type of the reference the
* block.
*/
key = stack[--sp];
i = key & 0xffffff;
if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE)) {
ret = -ENXIO;
goto out;
}
/* Read header quadlet for the block to get the length. */
ret = read_rom(device, generation, i, &rom[i]);
if (ret != RCODE_COMPLETE)
goto out;
end = i + (rom[i] >> 16) + 1;
if (end > MAX_CONFIG_ROM_SIZE) {
/*
* This block extends outside the config ROM which is
* a firmware bug. Ignore this whole block, i.e.
* simply set a fake block length of 0.
*/
fw_err(card, "skipped invalid ROM block %x at %llx\n",
rom[i],
i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
rom[i] = 0;
end = i;
}
i++;
/*
* Now read in the block. If this is a directory
* block, check the entries as we read them to see if
* it references another block, and push it in that case.
*/
for (; i < end; i++) {
ret = read_rom(device, generation, i, &rom[i]);
if (ret != RCODE_COMPLETE)
goto out;
if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
continue;
/*
* Offset points outside the ROM. May be a firmware
* bug or an Extended ROM entry (IEEE 1212-2001 clause
* 7.7.18). Simply overwrite this pointer here by a
* fake immediate entry so that later iterators over
* the ROM don't have to check offsets all the time.
*/
if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
fw_err(card,
"skipped unsupported ROM entry %x at %llx\n",
rom[i],
i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
rom[i] = 0;
continue;
}
stack[sp++] = i + rom[i];
}
if (length < i)
length = i;
}
old_rom = device->config_rom;
new_rom = kmemdup(rom, length * 4, GFP_KERNEL);
if (new_rom == NULL) {
ret = -ENOMEM;
goto out;
}
down_write(&fw_device_rwsem);
device->config_rom = new_rom;
device->config_rom_length = length;
up_write(&fw_device_rwsem);
kfree(old_rom);
ret = RCODE_COMPLETE;
device->max_rec = rom[2] >> 12 & 0xf;
device->cmc = rom[2] >> 30 & 1;
device->irmc = rom[2] >> 31 & 1;
out:
kfree(rom);
return ret;
}
static void fw_unit_release(struct device *dev)
{
struct fw_unit *unit = fw_unit(dev);
fw_device_put(fw_parent_device(unit));
kfree(unit);
}
static struct device_type fw_unit_type = {
.uevent = fw_unit_uevent,
.release = fw_unit_release,
};
static bool is_fw_unit(struct device *dev)
{
return dev->type == &fw_unit_type;
}
static void create_units(struct fw_device *device)
{
struct fw_csr_iterator ci;
struct fw_unit *unit;
int key, value, i;
i = 0;
fw_csr_iterator_init(&ci, &device->config_rom[5]);
while (fw_csr_iterator_next(&ci, &key, &value)) {
if (key != (CSR_UNIT | CSR_DIRECTORY))
continue;
/*
* Get the address of the unit directory and try to
* match the drivers id_tables against it.
*/
unit = kzalloc(sizeof(*unit), GFP_KERNEL);
if (unit == NULL)
continue;
unit->directory = ci.p + value - 1;
unit->device.bus = &fw_bus_type;
unit->device.type = &fw_unit_type;
unit->device.parent = &device->device;
dev_set_name(&unit->device, "%s.%d", dev_name(&device->device), i++);
BUILD_BUG_ON(ARRAY_SIZE(unit->attribute_group.attrs) <
ARRAY_SIZE(fw_unit_attributes) +
ARRAY_SIZE(config_rom_attributes));
init_fw_attribute_group(&unit->device,
fw_unit_attributes,
&unit->attribute_group);
if (device_register(&unit->device) < 0)
goto skip_unit;
fw_device_get(device);
continue;
skip_unit:
kfree(unit);
}
}
static int shutdown_unit(struct device *device, void *data)
{
device_unregister(device);
return 0;
}
/*
* fw_device_rwsem acts as dual purpose mutex:
* - serializes accesses to fw_device_idr,
* - serializes accesses to fw_device.config_rom/.config_rom_length and
* fw_unit.directory, unless those accesses happen at safe occasions
*/
DECLARE_RWSEM(fw_device_rwsem);
DEFINE_IDR(fw_device_idr);
int fw_cdev_major;
struct fw_device *fw_device_get_by_devt(dev_t devt)
{
struct fw_device *device;
down_read(&fw_device_rwsem);
device = idr_find(&fw_device_idr, MINOR(devt));
if (device)
fw_device_get(device);
up_read(&fw_device_rwsem);
return device;
}
struct workqueue_struct *fw_workqueue;
EXPORT_SYMBOL(fw_workqueue);
static void fw_schedule_device_work(struct fw_device *device,
unsigned long delay)
{
queue_delayed_work(fw_workqueue, &device->work, delay);
}
/*
* These defines control the retry behavior for reading the config
* rom. It shouldn't be necessary to tweak these; if the device
* doesn't respond to a config rom read within 10 seconds, it's not
* going to respond at all. As for the initial delay, a lot of
* devices will be able to respond within half a second after bus
* reset. On the other hand, it's not really worth being more
* aggressive than that, since it scales pretty well; if 10 devices
* are plugged in, they're all getting read within one second.
*/
#define MAX_RETRIES 10
#define RETRY_DELAY (3 * HZ)
#define INITIAL_DELAY (HZ / 2)
#define SHUTDOWN_DELAY (2 * HZ)
static void fw_device_shutdown(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
int minor = MINOR(device->device.devt);
if (time_before64(get_jiffies_64(),
device->card->reset_jiffies + SHUTDOWN_DELAY)
&& !list_empty(&device->card->link)) {
fw_schedule_device_work(device, SHUTDOWN_DELAY);
return;
}
if (atomic_cmpxchg(&device->state,
FW_DEVICE_GONE,
FW_DEVICE_SHUTDOWN) != FW_DEVICE_GONE)
return;
fw_device_cdev_remove(device);
device_for_each_child(&device->device, NULL, shutdown_unit);
device_unregister(&device->device);
down_write(&fw_device_rwsem);
idr_remove(&fw_device_idr, minor);
up_write(&fw_device_rwsem);
fw_device_put(device);
}
static void fw_device_release(struct device *dev)
{
struct fw_device *device = fw_device(dev);
struct fw_card *card = device->card;
unsigned long flags;
/*
* Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled or while the
* bus manager work looks at this node.
*/
spin_lock_irqsave(&card->lock, flags);
device->node->data = NULL;
spin_unlock_irqrestore(&card->lock, flags);
fw_node_put(device->node);
kfree(device->config_rom);
kfree(device);
fw_card_put(card);
}
static struct device_type fw_device_type = {
.release = fw_device_release,
};
static bool is_fw_device(struct device *dev)
{
return dev->type == &fw_device_type;
}
static int update_unit(struct device *dev, void *data)
{
struct fw_unit *unit = fw_unit(dev);
struct fw_driver *driver = (struct fw_driver *)dev->driver;
if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
device_lock(dev);
driver->update(unit);
device_unlock(dev);
}
return 0;
}
static void fw_device_update(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
fw_device_cdev_update(device);
device_for_each_child(&device->device, NULL, update_unit);
}
/*
* If a device was pending for deletion because its node went away but its
* bus info block and root directory header matches that of a newly discovered
* device, revive the existing fw_device.
* The newly allocated fw_device becomes obsolete instead.
*/
static int lookup_existing_device(struct device *dev, void *data)
{
struct fw_device *old = fw_device(dev);
struct fw_device *new = data;
struct fw_card *card = new->card;
int match = 0;
if (!is_fw_device(dev))
return 0;
down_read(&fw_device_rwsem); /* serialize config_rom access */
spin_lock_irq(&card->lock); /* serialize node access */
if (memcmp(old->config_rom, new->config_rom, 6 * 4) == 0 &&
atomic_cmpxchg(&old->state,
FW_DEVICE_GONE,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
struct fw_node *current_node = new->node;
struct fw_node *obsolete_node = old->node;
new->node = obsolete_node;
new->node->data = new;
old->node = current_node;
old->node->data = old;
old->max_speed = new->max_speed;
old->node_id = current_node->node_id;
smp_wmb(); /* update node_id before generation */
old->generation = card->generation;
old->config_rom_retries = 0;
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
old->workfn = fw_device_update;
fw_schedule_device_work(old, 0);
if (current_node == card->root_node)
fw_schedule_bm_work(card, 0);
match = 1;
}
spin_unlock_irq(&card->lock);
up_read(&fw_device_rwsem);
return match;
}
enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
static void set_broadcast_channel(struct fw_device *device, int generation)
{
struct fw_card *card = device->card;
__be32 data;
int rcode;
if (!card->broadcast_channel_allocated)
return;
/*
* The Broadcast_Channel Valid bit is required by nodes which want to
* transmit on this channel. Such transmissions are practically
* exclusive to IP over 1394 (RFC 2734). IP capable nodes are required
* to be IRM capable and have a max_rec of 8 or more. We use this fact
* to narrow down to which nodes we send Broadcast_Channel updates.
*/
if (!device->irmc || device->max_rec < 8)
return;
/*
* Some 1394-1995 nodes crash if this 1394a-2000 register is written.
* Perform a read test first.
*/
if (device->bc_implemented == BC_UNKNOWN) {
rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
&data, 4);
switch (rcode) {
case RCODE_COMPLETE:
if (data & cpu_to_be32(1 << 31)) {
device->bc_implemented = BC_IMPLEMENTED;
break;
}
fallthrough; /* to case address error */
case RCODE_ADDRESS_ERROR:
device->bc_implemented = BC_UNIMPLEMENTED;
}
}
if (device->bc_implemented == BC_IMPLEMENTED) {
data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
BROADCAST_CHANNEL_VALID);
fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
&data, 4);
}
}
int fw_device_set_broadcast_channel(struct device *dev, void *gen)
{
if (is_fw_device(dev))
set_broadcast_channel(fw_device(dev), (long)gen);
return 0;
}
static void fw_device_init(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct fw_card *card = device->card;
struct device *revived_dev;
int minor, ret;
/*
* All failure paths here set node->data to NULL, so that we
* don't try to do device_for_each_child() on a kfree()'d
* device.
*/
ret = read_config_rom(device, device->generation);
if (ret != RCODE_COMPLETE) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
fw_schedule_device_work(device, RETRY_DELAY);
} else {
if (device->node->link_on)
fw_notice(card, "giving up on node %x: reading config rom failed: %s\n",
device->node_id,
fw_rcode_string(ret));
if (device->node == card->root_node)
fw_schedule_bm_work(card, 0);
fw_device_release(&device->device);
}
return;
}
revived_dev = device_find_child(card->device,
device, lookup_existing_device);
if (revived_dev) {
put_device(revived_dev);
fw_device_release(&device->device);
return;
}
device_initialize(&device->device);
fw_device_get(device);
down_write(&fw_device_rwsem);
minor = idr_alloc(&fw_device_idr, device, 0, 1 << MINORBITS,
GFP_KERNEL);
up_write(&fw_device_rwsem);
if (minor < 0)
goto error;
device->device.bus = &fw_bus_type;
device->device.type = &fw_device_type;
device->device.parent = card->device;
device->device.devt = MKDEV(fw_cdev_major, minor);
dev_set_name(&device->device, "fw%d", minor);
BUILD_BUG_ON(ARRAY_SIZE(device->attribute_group.attrs) <
ARRAY_SIZE(fw_device_attributes) +
ARRAY_SIZE(config_rom_attributes));
init_fw_attribute_group(&device->device,
fw_device_attributes,
&device->attribute_group);
if (device_add(&device->device)) {
fw_err(card, "failed to add device\n");
goto error_with_cdev;
}
create_units(device);
/*
* Transition the device to running state. If it got pulled
* out from under us while we did the initialization work, we
* have to shut down the device again here. Normally, though,
* fw_node_event will be responsible for shutting it down when
* necessary. We have to use the atomic cmpxchg here to avoid
* racing with the FW_NODE_DESTROYED case in
* fw_node_event().
*/
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
} else {
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
dev_name(&device->device),
device->config_rom[3], device->config_rom[4],
1 << device->max_speed);
device->config_rom_retries = 0;
set_broadcast_channel(device, device->generation);
add_device_randomness(&device->config_rom[3], 8);
}
/*
* Reschedule the IRM work if we just finished reading the
* root node config rom. If this races with a bus reset we
* just end up running the IRM work a couple of extra times -
* pretty harmless.
*/
if (device->node == card->root_node)
fw_schedule_bm_work(card, 0);
return;
error_with_cdev:
down_write(&fw_device_rwsem);
idr_remove(&fw_device_idr, minor);
up_write(&fw_device_rwsem);
error:
fw_device_put(device); /* fw_device_idr's reference */
put_device(&device->device); /* our reference */
}
/* Reread and compare bus info block and header of root directory */
static int reread_config_rom(struct fw_device *device, int generation,
bool *changed)
{
u32 q;
int i, rcode;
for (i = 0; i < 6; i++) {
rcode = read_rom(device, generation, i, &q);
if (rcode != RCODE_COMPLETE)
return rcode;
if (i == 0 && q == 0)
/* inaccessible (see read_config_rom); retry later */
return RCODE_BUSY;
if (q != device->config_rom[i]) {
*changed = true;
return RCODE_COMPLETE;
}
}
*changed = false;
return RCODE_COMPLETE;
}
static void fw_device_refresh(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct fw_card *card = device->card;
int ret, node_id = device->node_id;
bool changed;
ret = reread_config_rom(device, device->generation, &changed);
if (ret != RCODE_COMPLETE)
goto failed_config_rom;
if (!changed) {
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
goto gone;
fw_device_update(work);
device->config_rom_retries = 0;
goto out;
}
/*
* Something changed. We keep things simple and don't investigate
* further. We just destroy all previous units and create new ones.
*/
device_for_each_child(&device->device, NULL, shutdown_unit);
ret = read_config_rom(device, device->generation);
if (ret != RCODE_COMPLETE)
goto failed_config_rom;
fw_device_cdev_update(device);
create_units(device);
/* Userspace may want to re-read attributes. */
kobject_uevent(&device->device.kobj, KOBJ_CHANGE);
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE)
goto gone;
fw_notice(card, "refreshed device %s\n", dev_name(&device->device));
device->config_rom_retries = 0;
goto out;
failed_config_rom:
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
fw_schedule_device_work(device, RETRY_DELAY);
return;
}
fw_notice(card, "giving up on refresh of device %s: %s\n",
dev_name(&device->device), fw_rcode_string(ret));
gone:
atomic_set(&device->state, FW_DEVICE_GONE);
device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
out:
if (node_id == card->root_node->node_id)
fw_schedule_bm_work(card, 0);
}
static void fw_device_workfn(struct work_struct *work)
{
struct fw_device *device = container_of(to_delayed_work(work),
struct fw_device, work);
device->workfn(work);
}
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{
struct fw_device *device;
switch (event) {
case FW_NODE_CREATED:
/*
* Attempt to scan the node, regardless whether its self ID has
* the L (link active) flag set or not. Some broken devices
* send L=0 but have an up-and-running link; others send L=1
* without actually having a link.
*/
create:
device = kzalloc(sizeof(*device), GFP_ATOMIC);
if (device == NULL)
break;
/*
* Do minimal initialization of the device here, the
* rest will happen in fw_device_init().
*
* Attention: A lot of things, even fw_device_get(),
* cannot be done before fw_device_init() finished!
* You can basically just check device->state and
* schedule work until then, but only while holding
* card->lock.
*/
atomic_set(&device->state, FW_DEVICE_INITIALIZING);
device->card = fw_card_get(card);
device->node = fw_node_get(node);
device->node_id = node->node_id;
device->generation = card->generation;
device->is_local = node == card->local_node;
mutex_init(&device->client_list_mutex);
INIT_LIST_HEAD(&device->client_list);
/*
* Set the node data to point back to this device so
* FW_NODE_UPDATED callbacks can update the node_id
* and generation for the device.
*/
node->data = device;
/*
* Many devices are slow to respond after bus resets,
* especially if they are bus powered and go through
* power-up after getting plugged in. We schedule the
* first config rom scan half a second after bus reset.
*/
device->workfn = fw_device_init;
INIT_DELAYED_WORK(&device->work, fw_device_workfn);
fw_schedule_device_work(device, INITIAL_DELAY);
break;
case FW_NODE_INITIATED_RESET:
case FW_NODE_LINK_ON:
device = node->data;
if (device == NULL)
goto create;
device->node_id = node->node_id;
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
if (atomic_cmpxchg(&device->state,
FW_DEVICE_RUNNING,
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
device->workfn = fw_device_refresh;
fw_schedule_device_work(device,
device->is_local ? 0 : INITIAL_DELAY);
}
break;
case FW_NODE_UPDATED:
device = node->data;
if (device == NULL)
break;
device->node_id = node->node_id;
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
device->workfn = fw_device_update;
fw_schedule_device_work(device, 0);
}
break;
case FW_NODE_DESTROYED:
case FW_NODE_LINK_OFF:
if (!node->data)
break;
/*
* Destroy the device associated with the node. There
* are two cases here: either the device is fully
* initialized (FW_DEVICE_RUNNING) or we're in the
* process of reading its config rom
* (FW_DEVICE_INITIALIZING). If it is fully
* initialized we can reuse device->work to schedule a
* full fw_device_shutdown(). If not, there's work
* scheduled to read it's config rom, and we just put
* the device in shutdown state to have that code fail
* to create the device.
*/
device = node->data;
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
device->workfn = fw_device_shutdown;
fw_schedule_device_work(device,
list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
}
break;
}
}
| linux-master | drivers/firewire/core-device.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt Time Management Unit (TMU) support
*
* Copyright (C) 2019, Intel Corporation
* Authors: Mika Westerberg <[email protected]>
* Rajmohan Mani <[email protected]>
*/
#include <linux/delay.h>
#include "tb.h"
static const unsigned int tmu_rates[] = {
[TB_SWITCH_TMU_MODE_OFF] = 0,
[TB_SWITCH_TMU_MODE_LOWRES] = 1000,
[TB_SWITCH_TMU_MODE_HIFI_UNI] = 16,
[TB_SWITCH_TMU_MODE_HIFI_BI] = 16,
[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = 16,
};
static const struct {
unsigned int freq_meas_window;
unsigned int avg_const;
unsigned int delta_avg_const;
unsigned int repl_timeout;
unsigned int repl_threshold;
unsigned int repl_n;
unsigned int dirswitch_n;
} tmu_params[] = {
[TB_SWITCH_TMU_MODE_OFF] = { },
[TB_SWITCH_TMU_MODE_LOWRES] = { 30, 4, },
[TB_SWITCH_TMU_MODE_HIFI_UNI] = { 800, 8, },
[TB_SWITCH_TMU_MODE_HIFI_BI] = { 800, 8, },
[TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI] = {
800, 4, 0, 3125, 25, 128, 255,
},
};
static const char *tmu_mode_name(enum tb_switch_tmu_mode mode)
{
switch (mode) {
case TB_SWITCH_TMU_MODE_OFF:
return "off";
case TB_SWITCH_TMU_MODE_LOWRES:
return "uni-directional, LowRes";
case TB_SWITCH_TMU_MODE_HIFI_UNI:
return "uni-directional, HiFi";
case TB_SWITCH_TMU_MODE_HIFI_BI:
return "bi-directional, HiFi";
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
return "enhanced uni-directional, MedRes";
default:
return "unknown";
}
}
static bool tb_switch_tmu_enhanced_is_supported(const struct tb_switch *sw)
{
return usb4_switch_version(sw) > 1;
}
static int tb_switch_set_tmu_mode_params(struct tb_switch *sw,
enum tb_switch_tmu_mode mode)
{
u32 freq, avg, val;
int ret;
freq = tmu_params[mode].freq_meas_window;
avg = tmu_params[mode].avg_const;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_0, 1);
if (ret)
return ret;
val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK;
val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq);
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_0, 1);
if (ret)
return ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_15, 1);
if (ret)
return ret;
val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK &
~TMU_RTR_CS_15_DELAY_AVG_MASK &
~TMU_RTR_CS_15_OFFSET_AVG_MASK &
~TMU_RTR_CS_15_ERROR_AVG_MASK;
val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) |
FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg);
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_15, 1);
if (ret)
return ret;
if (tb_switch_tmu_enhanced_is_supported(sw)) {
u32 delta_avg = tmu_params[mode].delta_avg_const;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_18, 1);
if (ret)
return ret;
val &= ~TMU_RTR_CS_18_DELTA_AVG_CONST_MASK;
val |= FIELD_PREP(TMU_RTR_CS_18_DELTA_AVG_CONST_MASK, delta_avg);
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_18, 1);
}
return ret;
}
static bool tb_switch_tmu_ucap_is_supported(struct tb_switch *sw)
{
int ret;
u32 val;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_0, 1);
if (ret)
return false;
return !!(val & TMU_RTR_CS_0_UCAP);
}
static int tb_switch_tmu_rate_read(struct tb_switch *sw)
{
int ret;
u32 val;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_3, 1);
if (ret)
return ret;
val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
return val;
}
static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate)
{
int ret;
u32 val;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_3, 1);
if (ret)
return ret;
val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK;
val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT;
return tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->tmu.cap + TMU_RTR_CS_3, 1);
}
static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask,
u32 value)
{
u32 data;
int ret;
ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1);
if (ret)
return ret;
data &= ~mask;
data |= value;
return tb_port_write(port, &data, TB_CFG_PORT,
port->cap_tmu + offset, 1);
}
static int tb_port_tmu_set_unidirectional(struct tb_port *port,
bool unidirectional)
{
u32 val;
if (!port->sw->tmu.has_ucap)
return 0;
val = unidirectional ? TMU_ADP_CS_3_UDM : 0;
return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val);
}
static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
{
return tb_port_tmu_set_unidirectional(port, false);
}
static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
{
return tb_port_tmu_set_unidirectional(port, true);
}
static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
{
int ret;
u32 val;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_3, 1);
if (ret)
return false;
return val & TMU_ADP_CS_3_UDM;
}
static bool tb_port_tmu_is_enhanced(struct tb_port *port)
{
int ret;
u32 val;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_8, 1);
if (ret)
return false;
return val & TMU_ADP_CS_8_EUDM;
}
/* Can be called to non-v2 lane adapters too */
static int tb_port_tmu_enhanced_enable(struct tb_port *port, bool enable)
{
int ret;
u32 val;
if (!tb_switch_tmu_enhanced_is_supported(port->sw))
return 0;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_8, 1);
if (ret)
return ret;
if (enable)
val |= TMU_ADP_CS_8_EUDM;
else
val &= ~TMU_ADP_CS_8_EUDM;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_8, 1);
}
static int tb_port_set_tmu_mode_params(struct tb_port *port,
enum tb_switch_tmu_mode mode)
{
u32 repl_timeout, repl_threshold, repl_n, dirswitch_n, val;
int ret;
repl_timeout = tmu_params[mode].repl_timeout;
repl_threshold = tmu_params[mode].repl_threshold;
repl_n = tmu_params[mode].repl_n;
dirswitch_n = tmu_params[mode].dirswitch_n;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_8, 1);
if (ret)
return ret;
val &= ~TMU_ADP_CS_8_REPL_TIMEOUT_MASK;
val &= ~TMU_ADP_CS_8_REPL_THRESHOLD_MASK;
val |= FIELD_PREP(TMU_ADP_CS_8_REPL_TIMEOUT_MASK, repl_timeout);
val |= FIELD_PREP(TMU_ADP_CS_8_REPL_THRESHOLD_MASK, repl_threshold);
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_8, 1);
if (ret)
return ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_9, 1);
if (ret)
return ret;
val &= ~TMU_ADP_CS_9_REPL_N_MASK;
val &= ~TMU_ADP_CS_9_DIRSWITCH_N_MASK;
val |= FIELD_PREP(TMU_ADP_CS_9_REPL_N_MASK, repl_n);
val |= FIELD_PREP(TMU_ADP_CS_9_DIRSWITCH_N_MASK, dirswitch_n);
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_9, 1);
}
/* Can be called to non-v2 lane adapters too */
static int tb_port_tmu_rate_write(struct tb_port *port, int rate)
{
int ret;
u32 val;
if (!tb_switch_tmu_enhanced_is_supported(port->sw))
return 0;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_9, 1);
if (ret)
return ret;
val &= ~TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK;
val |= FIELD_PREP(TMU_ADP_CS_9_ADP_TS_INTERVAL_MASK, rate);
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_tmu + TMU_ADP_CS_9, 1);
}
static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
{
u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
}
static int tb_port_tmu_time_sync_disable(struct tb_port *port)
{
return tb_port_tmu_time_sync(port, true);
}
static int tb_port_tmu_time_sync_enable(struct tb_port *port)
{
return tb_port_tmu_time_sync(port, false);
}
static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
{
u32 val, offset, bit;
int ret;
if (tb_switch_is_usb4(sw)) {
offset = sw->tmu.cap + TMU_RTR_CS_0;
bit = TMU_RTR_CS_0_TD;
} else {
offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
bit = TB_TIME_VSEC_3_CS_26_TD;
}
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
if (set)
val |= bit;
else
val &= ~bit;
return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
}
static int tmu_mode_init(struct tb_switch *sw)
{
bool enhanced, ucap;
int ret, rate;
ucap = tb_switch_tmu_ucap_is_supported(sw);
if (ucap)
tb_sw_dbg(sw, "TMU: supports uni-directional mode\n");
enhanced = tb_switch_tmu_enhanced_is_supported(sw);
if (enhanced)
tb_sw_dbg(sw, "TMU: supports enhanced uni-directional mode\n");
ret = tb_switch_tmu_rate_read(sw);
if (ret < 0)
return ret;
rate = ret;
/* Off by default */
sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
if (tb_route(sw)) {
struct tb_port *up = tb_upstream_port(sw);
if (enhanced && tb_port_tmu_is_enhanced(up)) {
sw->tmu.mode = TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI;
} else if (ucap && tb_port_tmu_is_unidirectional(up)) {
if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
sw->tmu.mode = TB_SWITCH_TMU_MODE_LOWRES;
else if (tmu_rates[TB_SWITCH_TMU_MODE_LOWRES] == rate)
sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
} else if (rate) {
sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
}
} else if (rate) {
sw->tmu.mode = TB_SWITCH_TMU_MODE_HIFI_BI;
}
/* Update the initial request to match the current mode */
sw->tmu.mode_request = sw->tmu.mode;
sw->tmu.has_ucap = ucap;
return 0;
}
/**
* tb_switch_tmu_init() - Initialize switch TMU structures
* @sw: Switch to initialized
*
* This function must be called before other TMU related functions to
* makes the internal structures are filled in correctly. Does not
* change any hardware configuration.
*/
int tb_switch_tmu_init(struct tb_switch *sw)
{
struct tb_port *port;
int ret;
if (tb_switch_is_icm(sw))
return 0;
ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU);
if (ret > 0)
sw->tmu.cap = ret;
tb_switch_for_each_port(sw, port) {
int cap;
cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1);
if (cap > 0)
port->cap_tmu = cap;
}
ret = tmu_mode_init(sw);
if (ret)
return ret;
tb_sw_dbg(sw, "TMU: current mode: %s\n", tmu_mode_name(sw->tmu.mode));
return 0;
}
/**
* tb_switch_tmu_post_time() - Update switch local time
* @sw: Switch whose time to update
*
* Updates switch local time using time posting procedure.
*/
int tb_switch_tmu_post_time(struct tb_switch *sw)
{
unsigned int post_time_high_offset, post_time_high = 0;
unsigned int post_local_time_offset, post_time_offset;
struct tb_switch *root_switch = sw->tb->root_switch;
u64 hi, mid, lo, local_time, post_time;
int i, ret, retries = 100;
u32 gm_local_time[3];
if (!tb_route(sw))
return 0;
if (!tb_switch_is_usb4(sw))
return 0;
/* Need to be able to read the grand master time */
if (!root_switch->tmu.cap)
return 0;
ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH,
root_switch->tmu.cap + TMU_RTR_CS_1,
ARRAY_SIZE(gm_local_time));
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(gm_local_time); i++)
tb_sw_dbg(root_switch, "TMU: local_time[%d]=0x%08x\n", i,
gm_local_time[i]);
/* Convert to nanoseconds (drop fractional part) */
hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK;
mid = gm_local_time[1];
lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >>
TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT;
local_time = hi << 48 | mid << 16 | lo;
/* Tell the switch that time sync is disrupted for a while */
ret = tb_switch_tmu_set_time_disruption(sw, true);
if (ret)
return ret;
post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
/*
* Write the Grandmaster time to the Post Local Time registers
* of the new switch.
*/
ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH,
post_local_time_offset, 2);
if (ret)
goto out;
/*
* Have the new switch update its local time by:
* 1) writing 0x1 to the Post Time Low register and 0xffffffff to
* Post Time High register.
* 2) write 0 to Post Time High register and then wait for
* the completion of the post_time register becomes 0.
* This means the time has been converged properly.
*/
post_time = 0xffffffff00000001ULL;
ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
if (ret)
goto out;
ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
post_time_high_offset, 1);
if (ret)
goto out;
do {
usleep_range(5, 10);
ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
post_time_offset, 2);
if (ret)
goto out;
} while (--retries && post_time);
if (!retries) {
ret = -ETIMEDOUT;
goto out;
}
tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time);
out:
tb_switch_tmu_set_time_disruption(sw, false);
return ret;
}
static int disable_enhanced(struct tb_port *up, struct tb_port *down)
{
int ret;
/*
* Router may already been disconnected so ignore errors on the
* upstream port.
*/
tb_port_tmu_rate_write(up, 0);
tb_port_tmu_enhanced_enable(up, false);
ret = tb_port_tmu_rate_write(down, 0);
if (ret)
return ret;
return tb_port_tmu_enhanced_enable(down, false);
}
/**
* tb_switch_tmu_disable() - Disable TMU of a switch
* @sw: Switch whose TMU to disable
*
* Turns off TMU of @sw if it is enabled. If not enabled does nothing.
*/
int tb_switch_tmu_disable(struct tb_switch *sw)
{
/* Already disabled? */
if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF)
return 0;
if (tb_route(sw)) {
struct tb_port *down, *up;
int ret;
down = tb_switch_downstream_port(sw);
up = tb_upstream_port(sw);
/*
* In case of uni-directional time sync, TMU handshake is
* initiated by upstream router. In case of bi-directional
* time sync, TMU handshake is initiated by downstream router.
* We change downstream router's rate to off for both uni/bidir
* cases although it is needed only for the bi-directional mode.
* We avoid changing upstream router's mode since it might
* have another downstream router plugged, that is set to
* uni-directional mode and we don't want to change it's TMU
* mode.
*/
ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
if (ret)
return ret;
tb_port_tmu_time_sync_disable(up);
ret = tb_port_tmu_time_sync_disable(down);
if (ret)
return ret;
switch (sw->tmu.mode) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
/* The switch may be unplugged so ignore any errors */
tb_port_tmu_unidirectional_disable(up);
ret = tb_port_tmu_unidirectional_disable(down);
if (ret)
return ret;
break;
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
ret = disable_enhanced(up, down);
if (ret)
return ret;
break;
default:
break;
}
} else {
tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_OFF]);
}
sw->tmu.mode = TB_SWITCH_TMU_MODE_OFF;
tb_sw_dbg(sw, "TMU: disabled\n");
return 0;
}
/* Called only when there is failure enabling requested mode */
static void tb_switch_tmu_off(struct tb_switch *sw)
{
unsigned int rate = tmu_rates[TB_SWITCH_TMU_MODE_OFF];
struct tb_port *down, *up;
down = tb_switch_downstream_port(sw);
up = tb_upstream_port(sw);
/*
* In case of any failure in one of the steps when setting
* bi-directional or uni-directional TMU mode, get back to the TMU
* configurations in off mode. In case of additional failures in
* the functions below, ignore them since the caller shall already
* report a failure.
*/
tb_port_tmu_time_sync_disable(down);
tb_port_tmu_time_sync_disable(up);
switch (sw->tmu.mode_request) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
break;
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
disable_enhanced(up, down);
break;
default:
break;
}
/* Always set the rate to 0 */
tb_switch_tmu_rate_write(sw, rate);
tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
tb_port_tmu_unidirectional_disable(down);
tb_port_tmu_unidirectional_disable(up);
}
/*
* This function is called when the previous TMU mode was
* TB_SWITCH_TMU_MODE_OFF.
*/
static int tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
ret = tb_port_tmu_unidirectional_disable(up);
if (ret)
return ret;
ret = tb_port_tmu_unidirectional_disable(down);
if (ret)
goto out;
ret = tb_switch_tmu_rate_write(sw, tmu_rates[TB_SWITCH_TMU_MODE_HIFI_BI]);
if (ret)
goto out;
ret = tb_port_tmu_time_sync_enable(up);
if (ret)
goto out;
ret = tb_port_tmu_time_sync_enable(down);
if (ret)
goto out;
return 0;
out:
tb_switch_tmu_off(sw);
return ret;
}
/* Only needed for Titan Ridge */
static int tb_switch_tmu_disable_objections(struct tb_switch *sw)
{
struct tb_port *up = tb_upstream_port(sw);
u32 val;
int ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
if (ret)
return ret;
val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
if (ret)
return ret;
return tb_port_tmu_write(up, TMU_ADP_CS_6,
TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 |
TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2);
}
/*
* This function is called when the previous TMU mode was
* TB_SWITCH_TMU_MODE_OFF.
*/
static int tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
ret = tb_switch_tmu_rate_write(tb_switch_parent(sw),
tmu_rates[sw->tmu.mode_request]);
if (ret)
return ret;
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
if (ret)
return ret;
ret = tb_port_tmu_unidirectional_enable(up);
if (ret)
goto out;
ret = tb_port_tmu_time_sync_enable(up);
if (ret)
goto out;
ret = tb_port_tmu_unidirectional_enable(down);
if (ret)
goto out;
ret = tb_port_tmu_time_sync_enable(down);
if (ret)
goto out;
return 0;
out:
tb_switch_tmu_off(sw);
return ret;
}
/*
* This function is called when the previous TMU mode was
* TB_SWITCH_TMU_RATE_OFF.
*/
static int tb_switch_tmu_enable_enhanced(struct tb_switch *sw)
{
unsigned int rate = tmu_rates[sw->tmu.mode_request];
struct tb_port *up, *down;
int ret;
/* Router specific parameters first */
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
if (ret)
return ret;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
ret = tb_port_set_tmu_mode_params(up, sw->tmu.mode_request);
if (ret)
goto out;
ret = tb_port_tmu_rate_write(up, rate);
if (ret)
goto out;
ret = tb_port_tmu_enhanced_enable(up, true);
if (ret)
goto out;
ret = tb_port_set_tmu_mode_params(down, sw->tmu.mode_request);
if (ret)
goto out;
ret = tb_port_tmu_rate_write(down, rate);
if (ret)
goto out;
ret = tb_port_tmu_enhanced_enable(down, true);
if (ret)
goto out;
return 0;
out:
tb_switch_tmu_off(sw);
return ret;
}
static void tb_switch_tmu_change_mode_prev(struct tb_switch *sw)
{
unsigned int rate = tmu_rates[sw->tmu.mode];
struct tb_port *down, *up;
down = tb_switch_downstream_port(sw);
up = tb_upstream_port(sw);
/*
* In case of any failure in one of the steps when change mode,
* get back to the TMU configurations in previous mode.
* In case of additional failures in the functions below,
* ignore them since the caller shall already report a failure.
*/
switch (sw->tmu.mode) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
tb_port_tmu_set_unidirectional(down, true);
tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
break;
case TB_SWITCH_TMU_MODE_HIFI_BI:
tb_port_tmu_set_unidirectional(down, false);
tb_switch_tmu_rate_write(sw, rate);
break;
default:
break;
}
tb_switch_set_tmu_mode_params(sw, sw->tmu.mode);
switch (sw->tmu.mode) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
tb_port_tmu_set_unidirectional(up, true);
break;
case TB_SWITCH_TMU_MODE_HIFI_BI:
tb_port_tmu_set_unidirectional(up, false);
break;
default:
break;
}
}
static int tb_switch_tmu_change_mode(struct tb_switch *sw)
{
unsigned int rate = tmu_rates[sw->tmu.mode_request];
struct tb_port *up, *down;
int ret;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
/* Program the upstream router downstream facing lane adapter */
switch (sw->tmu.mode_request) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
ret = tb_port_tmu_set_unidirectional(down, true);
if (ret)
goto out;
ret = tb_switch_tmu_rate_write(tb_switch_parent(sw), rate);
if (ret)
goto out;
break;
case TB_SWITCH_TMU_MODE_HIFI_BI:
ret = tb_port_tmu_set_unidirectional(down, false);
if (ret)
goto out;
ret = tb_switch_tmu_rate_write(sw, rate);
if (ret)
goto out;
break;
default:
/* Not allowed to change modes from other than above */
return -EINVAL;
}
ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.mode_request);
if (ret)
return ret;
/* Program the new mode and the downstream router lane adapter */
switch (sw->tmu.mode_request) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
ret = tb_port_tmu_set_unidirectional(up, true);
if (ret)
goto out;
break;
case TB_SWITCH_TMU_MODE_HIFI_BI:
ret = tb_port_tmu_set_unidirectional(up, false);
if (ret)
goto out;
break;
default:
/* Not allowed to change modes from other than above */
return -EINVAL;
}
ret = tb_port_tmu_time_sync_enable(down);
if (ret)
goto out;
ret = tb_port_tmu_time_sync_enable(up);
if (ret)
goto out;
return 0;
out:
tb_switch_tmu_change_mode_prev(sw);
return ret;
}
/**
* tb_switch_tmu_enable() - Enable TMU on a router
* @sw: Router whose TMU to enable
*
* Enables TMU of a router to be in uni-directional Normal/HiFi or
* bi-directional HiFi mode. Calling tb_switch_tmu_configure() is
* required before calling this function.
*/
int tb_switch_tmu_enable(struct tb_switch *sw)
{
int ret;
if (tb_switch_tmu_is_enabled(sw))
return 0;
if (tb_switch_is_titan_ridge(sw) &&
(sw->tmu.mode_request == TB_SWITCH_TMU_MODE_LOWRES ||
sw->tmu.mode_request == TB_SWITCH_TMU_MODE_HIFI_UNI)) {
ret = tb_switch_tmu_disable_objections(sw);
if (ret)
return ret;
}
ret = tb_switch_tmu_set_time_disruption(sw, true);
if (ret)
return ret;
if (tb_route(sw)) {
/*
* The used mode changes are from OFF to
* HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to
* HiFi-Uni.
*/
if (sw->tmu.mode == TB_SWITCH_TMU_MODE_OFF) {
switch (sw->tmu.mode_request) {
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
ret = tb_switch_tmu_enable_unidirectional(sw);
break;
case TB_SWITCH_TMU_MODE_HIFI_BI:
ret = tb_switch_tmu_enable_bidirectional(sw);
break;
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI:
ret = tb_switch_tmu_enable_enhanced(sw);
break;
default:
ret = -EINVAL;
break;
}
} else if (sw->tmu.mode == TB_SWITCH_TMU_MODE_LOWRES ||
sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_UNI ||
sw->tmu.mode == TB_SWITCH_TMU_MODE_HIFI_BI) {
ret = tb_switch_tmu_change_mode(sw);
} else {
ret = -EINVAL;
}
} else {
/*
* Host router port configurations are written as
* part of configurations for downstream port of the parent
* of the child node - see above.
* Here only the host router' rate configuration is written.
*/
ret = tb_switch_tmu_rate_write(sw, tmu_rates[sw->tmu.mode_request]);
}
if (ret) {
tb_sw_warn(sw, "TMU: failed to enable mode %s: %d\n",
tmu_mode_name(sw->tmu.mode_request), ret);
} else {
sw->tmu.mode = sw->tmu.mode_request;
tb_sw_dbg(sw, "TMU: mode set to: %s\n", tmu_mode_name(sw->tmu.mode));
}
return tb_switch_tmu_set_time_disruption(sw, false);
}
/**
* tb_switch_tmu_configure() - Configure the TMU mode
* @sw: Router whose mode to change
* @mode: Mode to configure
*
* Selects the TMU mode that is enabled when tb_switch_tmu_enable() is
* next called.
*
* Returns %0 in success and negative errno otherwise. Specifically
* returns %-EOPNOTSUPP if the requested mode is not possible (not
* supported by the router and/or topology).
*/
int tb_switch_tmu_configure(struct tb_switch *sw, enum tb_switch_tmu_mode mode)
{
switch (mode) {
case TB_SWITCH_TMU_MODE_OFF:
break;
case TB_SWITCH_TMU_MODE_LOWRES:
case TB_SWITCH_TMU_MODE_HIFI_UNI:
if (!sw->tmu.has_ucap)
return -EOPNOTSUPP;
break;
case TB_SWITCH_TMU_MODE_HIFI_BI:
break;
case TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI: {
const struct tb_switch *parent_sw = tb_switch_parent(sw);
if (!parent_sw || !tb_switch_tmu_enhanced_is_supported(parent_sw))
return -EOPNOTSUPP;
if (!tb_switch_tmu_enhanced_is_supported(sw))
return -EOPNOTSUPP;
break;
}
default:
tb_sw_warn(sw, "TMU: unsupported mode %u\n", mode);
return -EINVAL;
}
if (sw->tmu.mode_request != mode) {
tb_sw_dbg(sw, "TMU: mode change %s -> %s requested\n",
tmu_mode_name(sw->tmu.mode), tmu_mode_name(mode));
sw->tmu.mode_request = mode;
}
return 0;
}
| linux-master | drivers/thunderbolt/tmu.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NVM helpers
*
* Copyright (C) 2020, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "tb.h"
#define NVM_MIN_SIZE SZ_32K
#define NVM_MAX_SIZE SZ_1M
#define NVM_DATA_DWORDS 16
/* Intel specific NVM offsets */
#define INTEL_NVM_DEVID 0x05
#define INTEL_NVM_VERSION 0x08
#define INTEL_NVM_CSS 0x10
#define INTEL_NVM_FLASH_SIZE 0x45
/* ASMedia specific NVM offsets */
#define ASMEDIA_NVM_DATE 0x1c
#define ASMEDIA_NVM_VERSION 0x28
static DEFINE_IDA(nvm_ida);
/**
* struct tb_nvm_vendor_ops - Vendor specific NVM operations
* @read_version: Reads out NVM version from the flash
* @validate: Validates the NVM image before update (optional)
* @write_headers: Writes headers before the rest of the image (optional)
*/
struct tb_nvm_vendor_ops {
int (*read_version)(struct tb_nvm *nvm);
int (*validate)(struct tb_nvm *nvm);
int (*write_headers)(struct tb_nvm *nvm);
};
/**
* struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping
* @vendor: Vendor ID
* @vops: Vendor specific NVM operations
*
* Maps vendor ID to NVM vendor operations. If there is no mapping then
* NVM firmware upgrade is disabled for the device.
*/
struct tb_nvm_vendor {
u16 vendor;
const struct tb_nvm_vendor_ops *vops;
};
static int intel_switch_nvm_version(struct tb_nvm *nvm)
{
struct tb_switch *sw = tb_to_switch(nvm->dev);
u32 val, nvm_size, hdr_size;
int ret;
/*
* If the switch is in safe-mode the only accessible portion of
* the NVM is the non-active one where userspace is expected to
* write new functional NVM.
*/
if (sw->safe_mode)
return 0;
ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
if (ret)
return ret;
hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K;
nvm_size = (SZ_1M << (val & 7)) / 8;
nvm_size = (nvm_size - hdr_size) / 2;
ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val));
if (ret)
return ret;
nvm->major = (val >> 16) & 0xff;
nvm->minor = (val >> 8) & 0xff;
nvm->active_size = nvm_size;
return 0;
}
static int intel_switch_nvm_validate(struct tb_nvm *nvm)
{
struct tb_switch *sw = tb_to_switch(nvm->dev);
unsigned int image_size, hdr_size;
u16 ds_size, device_id;
u8 *buf = nvm->buf;
image_size = nvm->buf_data_size;
/*
* FARB pointer must point inside the image and must at least
* contain parts of the digital section we will be reading here.
*/
hdr_size = (*(u32 *)buf) & 0xffffff;
if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
return -EINVAL;
/* Digital section start should be aligned to 4k page */
if (!IS_ALIGNED(hdr_size, SZ_4K))
return -EINVAL;
/*
* Read digital section size and check that it also fits inside
* the image.
*/
ds_size = *(u16 *)(buf + hdr_size);
if (ds_size >= image_size)
return -EINVAL;
if (sw->safe_mode)
return 0;
/*
* Make sure the device ID in the image matches the one
* we read from the switch config space.
*/
device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
if (device_id != sw->config.device_id)
return -EINVAL;
/* Skip headers in the image */
nvm->buf_data_start = buf + hdr_size;
nvm->buf_data_size = image_size - hdr_size;
return 0;
}
static int intel_switch_nvm_write_headers(struct tb_nvm *nvm)
{
struct tb_switch *sw = tb_to_switch(nvm->dev);
if (sw->generation < 3) {
int ret;
/* Write CSS headers first */
ret = dma_port_flash_write(sw->dma_port,
DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS,
DMA_PORT_CSS_MAX_SIZE);
if (ret)
return ret;
}
return 0;
}
static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = {
.read_version = intel_switch_nvm_version,
.validate = intel_switch_nvm_validate,
.write_headers = intel_switch_nvm_write_headers,
};
static int asmedia_switch_nvm_version(struct tb_nvm *nvm)
{
struct tb_switch *sw = tb_to_switch(nvm->dev);
u32 val;
int ret;
ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val));
if (ret)
return ret;
nvm->major = (val << 16) & 0xff0000;
nvm->major |= val & 0x00ff00;
nvm->major |= (val >> 16) & 0x0000ff;
ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val));
if (ret)
return ret;
nvm->minor = (val << 16) & 0xff0000;
nvm->minor |= val & 0x00ff00;
nvm->minor |= (val >> 16) & 0x0000ff;
/* ASMedia NVM size is fixed to 512k */
nvm->active_size = SZ_512K;
return 0;
}
static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = {
.read_version = asmedia_switch_nvm_version,
};
/* Router vendor NVM support table */
static const struct tb_nvm_vendor switch_nvm_vendors[] = {
{ 0x174c, &asmedia_switch_nvm_ops },
{ PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops },
{ 0x8087, &intel_switch_nvm_ops },
};
static int intel_retimer_nvm_version(struct tb_nvm *nvm)
{
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
u32 val, nvm_size;
int ret;
ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val));
if (ret)
return ret;
nvm->major = (val >> 16) & 0xff;
nvm->minor = (val >> 8) & 0xff;
ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val));
if (ret)
return ret;
nvm_size = (SZ_1M << (val & 7)) / 8;
nvm_size = (nvm_size - SZ_16K) / 2;
nvm->active_size = nvm_size;
return 0;
}
static int intel_retimer_nvm_validate(struct tb_nvm *nvm)
{
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
unsigned int image_size, hdr_size;
u8 *buf = nvm->buf;
u16 ds_size, device;
image_size = nvm->buf_data_size;
/*
* FARB pointer must point inside the image and must at least
* contain parts of the digital section we will be reading here.
*/
hdr_size = (*(u32 *)buf) & 0xffffff;
if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size)
return -EINVAL;
/* Digital section start should be aligned to 4k page */
if (!IS_ALIGNED(hdr_size, SZ_4K))
return -EINVAL;
/*
* Read digital section size and check that it also fits inside
* the image.
*/
ds_size = *(u16 *)(buf + hdr_size);
if (ds_size >= image_size)
return -EINVAL;
/*
* Make sure the device ID in the image matches the retimer
* hardware.
*/
device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID);
if (device != rt->device)
return -EINVAL;
/* Skip headers in the image */
nvm->buf_data_start = buf + hdr_size;
nvm->buf_data_size = image_size - hdr_size;
return 0;
}
static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = {
.read_version = intel_retimer_nvm_version,
.validate = intel_retimer_nvm_validate,
};
/* Retimer vendor NVM support table */
static const struct tb_nvm_vendor retimer_nvm_vendors[] = {
{ 0x8087, &intel_retimer_nvm_ops },
};
/**
* tb_nvm_alloc() - Allocate new NVM structure
* @dev: Device owning the NVM
*
* Allocates new NVM structure with unique @id and returns it. In case
* of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the
* NVM format of the @dev is not known by the kernel.
*/
struct tb_nvm *tb_nvm_alloc(struct device *dev)
{
const struct tb_nvm_vendor_ops *vops = NULL;
struct tb_nvm *nvm;
int ret, i;
if (tb_is_switch(dev)) {
const struct tb_switch *sw = tb_to_switch(dev);
for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) {
const struct tb_nvm_vendor *v = &switch_nvm_vendors[i];
if (v->vendor == sw->config.vendor_id) {
vops = v->vops;
break;
}
}
if (!vops) {
tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n",
sw->config.vendor_id);
return ERR_PTR(-EOPNOTSUPP);
}
} else if (tb_is_retimer(dev)) {
const struct tb_retimer *rt = tb_to_retimer(dev);
for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) {
const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i];
if (v->vendor == rt->vendor) {
vops = v->vops;
break;
}
}
if (!vops) {
dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n",
rt->vendor);
return ERR_PTR(-EOPNOTSUPP);
}
} else {
return ERR_PTR(-EOPNOTSUPP);
}
nvm = kzalloc(sizeof(*nvm), GFP_KERNEL);
if (!nvm)
return ERR_PTR(-ENOMEM);
ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
if (ret < 0) {
kfree(nvm);
return ERR_PTR(ret);
}
nvm->id = ret;
nvm->dev = dev;
nvm->vops = vops;
return nvm;
}
/**
* tb_nvm_read_version() - Read and populate NVM version
* @nvm: NVM structure
*
* Uses vendor specific means to read out and fill in the existing
* active NVM version. Returns %0 in case of success and negative errno
* otherwise.
*/
int tb_nvm_read_version(struct tb_nvm *nvm)
{
const struct tb_nvm_vendor_ops *vops = nvm->vops;
if (vops && vops->read_version)
return vops->read_version(nvm);
return -EOPNOTSUPP;
}
/**
* tb_nvm_validate() - Validate new NVM image
* @nvm: NVM structure
*
* Runs vendor specific validation over the new NVM image and if all
* checks pass returns %0. As side effect updates @nvm->buf_data_start
* and @nvm->buf_data_size fields to match the actual data to be written
* to the NVM.
*
* If the validation does not pass then returns negative errno.
*/
int tb_nvm_validate(struct tb_nvm *nvm)
{
const struct tb_nvm_vendor_ops *vops = nvm->vops;
unsigned int image_size;
u8 *buf = nvm->buf;
if (!buf)
return -EINVAL;
if (!vops)
return -EOPNOTSUPP;
/* Just do basic image size checks */
image_size = nvm->buf_data_size;
if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
return -EINVAL;
/*
* Set the default data start in the buffer. The validate method
* below can change this if needed.
*/
nvm->buf_data_start = buf;
return vops->validate ? vops->validate(nvm) : 0;
}
/**
* tb_nvm_write_headers() - Write headers before the rest of the image
* @nvm: NVM structure
*
* If the vendor NVM format requires writing headers before the rest of
* the image, this function does that. Can be called even if the device
* does not need this.
*
* Returns %0 in case of success and negative errno otherwise.
*/
int tb_nvm_write_headers(struct tb_nvm *nvm)
{
const struct tb_nvm_vendor_ops *vops = nvm->vops;
return vops->write_headers ? vops->write_headers(nvm) : 0;
}
/**
* tb_nvm_add_active() - Adds active NVMem device to NVM
* @nvm: NVM structure
* @reg_read: Pointer to the function to read the NVM (passed directly to the
* NVMem device)
*
* Registers new active NVmem device for @nvm. The @reg_read is called
* directly from NVMem so it must handle possible concurrent access if
* needed. The first parameter passed to @reg_read is @nvm structure.
* Returns %0 in success and negative errno otherwise.
*/
int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read)
{
struct nvmem_config config;
struct nvmem_device *nvmem;
memset(&config, 0, sizeof(config));
config.name = "nvm_active";
config.reg_read = reg_read;
config.read_only = true;
config.id = nvm->id;
config.stride = 4;
config.word_size = 4;
config.size = nvm->active_size;
config.dev = nvm->dev;
config.owner = THIS_MODULE;
config.priv = nvm;
nvmem = nvmem_register(&config);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
nvm->active = nvmem;
return 0;
}
/**
* tb_nvm_write_buf() - Write data to @nvm buffer
* @nvm: NVM structure
* @offset: Offset where to write the data
* @val: Data buffer to write
* @bytes: Number of bytes to write
*
* Helper function to cache the new NVM image before it is actually
* written to the flash. Copies @bytes from @val to @nvm->buf starting
* from @offset.
*/
int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val,
size_t bytes)
{
if (!nvm->buf) {
nvm->buf = vmalloc(NVM_MAX_SIZE);
if (!nvm->buf)
return -ENOMEM;
}
nvm->flushed = false;
nvm->buf_data_size = offset + bytes;
memcpy(nvm->buf + offset, val, bytes);
return 0;
}
/**
* tb_nvm_add_non_active() - Adds non-active NVMem device to NVM
* @nvm: NVM structure
* @reg_write: Pointer to the function to write the NVM (passed directly
* to the NVMem device)
*
* Registers new non-active NVmem device for @nvm. The @reg_write is called
* directly from NVMem so it must handle possible concurrent access if
* needed. The first parameter passed to @reg_write is @nvm structure.
* The size of the NVMem device is set to %NVM_MAX_SIZE.
*
* Returns %0 in success and negative errno otherwise.
*/
int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write)
{
struct nvmem_config config;
struct nvmem_device *nvmem;
memset(&config, 0, sizeof(config));
config.name = "nvm_non_active";
config.reg_write = reg_write;
config.root_only = true;
config.id = nvm->id;
config.stride = 4;
config.word_size = 4;
config.size = NVM_MAX_SIZE;
config.dev = nvm->dev;
config.owner = THIS_MODULE;
config.priv = nvm;
nvmem = nvmem_register(&config);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
nvm->non_active = nvmem;
return 0;
}
/**
* tb_nvm_free() - Release NVM and its resources
* @nvm: NVM structure to release
*
* Releases NVM and the NVMem devices if they were registered.
*/
void tb_nvm_free(struct tb_nvm *nvm)
{
if (nvm) {
nvmem_unregister(nvm->non_active);
nvmem_unregister(nvm->active);
vfree(nvm->buf);
ida_simple_remove(&nvm_ida, nvm->id);
}
kfree(nvm);
}
/**
* tb_nvm_read_data() - Read data from NVM
* @address: Start address on the flash
* @buf: Buffer where the read data is copied
* @size: Size of the buffer in bytes
* @retries: Number of retries if block read fails
* @read_block: Function that reads block from the flash
* @read_block_data: Data passsed to @read_block
*
* This is a generic function that reads data from NVM or NVM like
* device.
*
* Returns %0 on success and negative errno otherwise.
*/
int tb_nvm_read_data(unsigned int address, void *buf, size_t size,
unsigned int retries, read_block_fn read_block,
void *read_block_data)
{
do {
unsigned int dwaddress, dwords, offset;
u8 data[NVM_DATA_DWORDS * 4];
size_t nbytes;
int ret;
offset = address & 3;
nbytes = min_t(size_t, size + offset, NVM_DATA_DWORDS * 4);
dwaddress = address / 4;
dwords = ALIGN(nbytes, 4) / 4;
ret = read_block(read_block_data, dwaddress, data, dwords);
if (ret) {
if (ret != -ENODEV && retries--)
continue;
return ret;
}
nbytes -= offset;
memcpy(buf, data + offset, nbytes);
size -= nbytes;
address += nbytes;
buf += nbytes;
} while (size > 0);
return 0;
}
/**
* tb_nvm_write_data() - Write data to NVM
* @address: Start address on the flash
* @buf: Buffer where the data is copied from
* @size: Size of the buffer in bytes
* @retries: Number of retries if the block write fails
* @write_block: Function that writes block to the flash
* @write_block_data: Data passwd to @write_block
*
* This is generic function that writes data to NVM or NVM like device.
*
* Returns %0 on success and negative errno otherwise.
*/
int tb_nvm_write_data(unsigned int address, const void *buf, size_t size,
unsigned int retries, write_block_fn write_block,
void *write_block_data)
{
do {
unsigned int offset, dwaddress;
u8 data[NVM_DATA_DWORDS * 4];
size_t nbytes;
int ret;
offset = address & 3;
nbytes = min_t(u32, size + offset, NVM_DATA_DWORDS * 4);
memcpy(data + offset, buf, nbytes);
dwaddress = address / 4;
ret = write_block(write_block_data, dwaddress, data, nbytes / 4);
if (ret) {
if (ret == -ETIMEDOUT) {
if (retries--)
continue;
ret = -EIO;
}
return ret;
}
size -= nbytes;
address += nbytes;
buf += nbytes;
} while (size > 0);
return 0;
}
void tb_nvm_exit(void)
{
ida_destroy(&nvm_ida);
}
| linux-master | drivers/thunderbolt/nvm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* NHI specific operations
*
* Copyright (C) 2019, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include <linux/delay.h>
#include <linux/suspend.h>
#include "nhi.h"
#include "nhi_regs.h"
#include "tb.h"
/* Ice Lake specific NHI operations */
#define ICL_LC_MAILBOX_TIMEOUT 500 /* ms */
static int check_for_device(struct device *dev, void *data)
{
return tb_is_switch(dev);
}
static bool icl_nhi_is_device_connected(struct tb_nhi *nhi)
{
struct tb *tb = pci_get_drvdata(nhi->pdev);
int ret;
ret = device_for_each_child(&tb->root_switch->dev, NULL,
check_for_device);
return ret > 0;
}
static int icl_nhi_force_power(struct tb_nhi *nhi, bool power)
{
u32 vs_cap;
/*
* The Thunderbolt host controller is present always in Ice Lake
* but the firmware may not be loaded and running (depending
* whether there is device connected and so on). Each time the
* controller is used we need to "Force Power" it first and wait
* for the firmware to indicate it is up and running. This "Force
* Power" is really not about actually powering on/off the
* controller so it is accessible even if "Force Power" is off.
*
* The actual power management happens inside shared ACPI power
* resources using standard ACPI methods.
*/
pci_read_config_dword(nhi->pdev, VS_CAP_22, &vs_cap);
if (power) {
vs_cap &= ~VS_CAP_22_DMA_DELAY_MASK;
vs_cap |= 0x22 << VS_CAP_22_DMA_DELAY_SHIFT;
vs_cap |= VS_CAP_22_FORCE_POWER;
} else {
vs_cap &= ~VS_CAP_22_FORCE_POWER;
}
pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap);
if (power) {
unsigned int retries = 350;
u32 val;
/* Wait until the firmware tells it is up and running */
do {
pci_read_config_dword(nhi->pdev, VS_CAP_9, &val);
if (val & VS_CAP_9_FW_READY)
return 0;
usleep_range(3000, 3100);
} while (--retries);
return -ETIMEDOUT;
}
return 0;
}
static void icl_nhi_lc_mailbox_cmd(struct tb_nhi *nhi, enum icl_lc_mailbox_cmd cmd)
{
u32 data;
data = (cmd << VS_CAP_19_CMD_SHIFT) & VS_CAP_19_CMD_MASK;
pci_write_config_dword(nhi->pdev, VS_CAP_19, data | VS_CAP_19_VALID);
}
static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout)
{
unsigned long end;
u32 data;
if (!timeout)
goto clear;
end = jiffies + msecs_to_jiffies(timeout);
do {
pci_read_config_dword(nhi->pdev, VS_CAP_18, &data);
if (data & VS_CAP_18_DONE)
goto clear;
usleep_range(1000, 1100);
} while (time_before(jiffies, end));
return -ETIMEDOUT;
clear:
/* Clear the valid bit */
pci_write_config_dword(nhi->pdev, VS_CAP_19, 0);
return 0;
}
static void icl_nhi_set_ltr(struct tb_nhi *nhi)
{
u32 max_ltr, ltr;
pci_read_config_dword(nhi->pdev, VS_CAP_16, &max_ltr);
max_ltr &= 0xffff;
/* Program the same value for both snoop and no-snoop */
ltr = max_ltr << 16 | max_ltr;
pci_write_config_dword(nhi->pdev, VS_CAP_15, ltr);
}
static int icl_nhi_suspend(struct tb_nhi *nhi)
{
struct tb *tb = pci_get_drvdata(nhi->pdev);
int ret;
if (icl_nhi_is_device_connected(nhi))
return 0;
if (tb_switch_is_icm(tb->root_switch)) {
/*
* If there is no device connected we need to perform
* both: a handshake through LC mailbox and force power
* down before entering D3.
*/
icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET);
ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
if (ret)
return ret;
}
return icl_nhi_force_power(nhi, false);
}
static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup)
{
struct tb *tb = pci_get_drvdata(nhi->pdev);
enum icl_lc_mailbox_cmd cmd;
if (!pm_suspend_via_firmware())
return icl_nhi_suspend(nhi);
if (!tb_switch_is_icm(tb->root_switch))
return 0;
cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE;
icl_nhi_lc_mailbox_cmd(nhi, cmd);
return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT);
}
static int icl_nhi_resume(struct tb_nhi *nhi)
{
int ret;
ret = icl_nhi_force_power(nhi, true);
if (ret)
return ret;
icl_nhi_set_ltr(nhi);
return 0;
}
static void icl_nhi_shutdown(struct tb_nhi *nhi)
{
icl_nhi_force_power(nhi, false);
}
const struct tb_nhi_ops icl_nhi_ops = {
.init = icl_nhi_resume,
.suspend_noirq = icl_nhi_suspend_noirq,
.resume_noirq = icl_nhi_resume,
.runtime_suspend = icl_nhi_suspend,
.runtime_resume = icl_nhi_resume,
.shutdown = icl_nhi_shutdown,
};
| linux-master | drivers/thunderbolt/nhi_ops.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Debugfs interface
*
* Copyright (C) 2020, Intel Corporation
* Authors: Gil Fine <[email protected]>
* Mika Westerberg <[email protected]>
*/
#include <linux/debugfs.h>
#include <linux/pm_runtime.h>
#include <linux/uaccess.h>
#include "tb.h"
#include "sb_regs.h"
#define PORT_CAP_V1_PCIE_LEN 1
#define PORT_CAP_V2_PCIE_LEN 2
#define PORT_CAP_POWER_LEN 2
#define PORT_CAP_LANE_LEN 3
#define PORT_CAP_USB3_LEN 5
#define PORT_CAP_DP_V1_LEN 9
#define PORT_CAP_DP_V2_LEN 14
#define PORT_CAP_TMU_V1_LEN 8
#define PORT_CAP_TMU_V2_LEN 10
#define PORT_CAP_BASIC_LEN 9
#define PORT_CAP_USB4_LEN 20
#define SWITCH_CAP_TMU_LEN 26
#define SWITCH_CAP_BASIC_LEN 27
#define PATH_LEN 2
#define COUNTER_SET_LEN 3
#define DEBUGFS_ATTR(__space, __write) \
static int __space ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __space ## _show, inode->i_private); \
} \
\
static const struct file_operations __space ## _fops = { \
.owner = THIS_MODULE, \
.open = __space ## _open, \
.release = single_release, \
.read = seq_read, \
.write = __write, \
.llseek = seq_lseek, \
}
#define DEBUGFS_ATTR_RO(__space) \
DEBUGFS_ATTR(__space, NULL)
#define DEBUGFS_ATTR_RW(__space) \
DEBUGFS_ATTR(__space, __space ## _write)
static struct dentry *tb_debugfs_root;
static void *validate_and_copy_from_user(const void __user *user_buf,
size_t *count)
{
size_t nbytes;
void *buf;
if (!*count)
return ERR_PTR(-EINVAL);
if (!access_ok(user_buf, *count))
return ERR_PTR(-EFAULT);
buf = (void *)get_zeroed_page(GFP_KERNEL);
if (!buf)
return ERR_PTR(-ENOMEM);
nbytes = min_t(size_t, *count, PAGE_SIZE);
if (copy_from_user(buf, user_buf, nbytes)) {
free_page((unsigned long)buf);
return ERR_PTR(-EFAULT);
}
*count = nbytes;
return buf;
}
static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
int long_fmt_len)
{
char *token;
u32 v[5];
int ret;
token = strsep(line, "\n");
if (!token)
return false;
/*
* For Adapter/Router configuration space:
* Short format is: offset value\n
* v[0] v[1]
* Long format as produced from the read side:
* offset relative_offset cap_id vs_cap_id value\n
* v[0] v[1] v[2] v[3] v[4]
*
* For Counter configuration space:
* Short format is: offset\n
* v[0]
* Long format as produced from the read side:
* offset relative_offset counter_id value\n
* v[0] v[1] v[2] v[3]
*/
ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]);
/* In case of Counters, clear counter, "val" content is NA */
if (ret == short_fmt_len) {
*offs = v[0];
*val = v[short_fmt_len - 1];
return true;
} else if (ret == long_fmt_len) {
*offs = v[0];
*val = v[long_fmt_len - 1];
return true;
}
return false;
}
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
const char __user *user_buf, size_t count,
loff_t *ppos)
{
struct tb *tb = sw->tb;
char *line, *buf;
u32 val, offset;
int ret = 0;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out;
}
/* User did hardware changes behind the driver's back */
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
line = buf;
while (parse_line(&line, &offset, &val, 2, 5)) {
if (port)
ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
else
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
if (ret)
break;
}
mutex_unlock(&tb->lock);
out:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
free_page((unsigned long)buf);
return ret < 0 ? ret : count;
}
static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
return regs_write(port->sw, port, user_buf, count, ppos);
}
static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_switch *sw = s->private;
return regs_write(sw, NULL, user_buf, count, ppos);
}
#define DEBUGFS_MODE 0600
#else
#define port_regs_write NULL
#define switch_regs_write NULL
#define DEBUGFS_MODE 0400
#endif
#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING)
/**
* struct tb_margining - Lane margining support
* @caps: Port lane margining capabilities
* @results: Last lane margining results
* @lanes: %0, %1 or %7 (all)
* @min_ber_level: Minimum supported BER level contour value
* @max_ber_level: Maximum supported BER level contour value
* @ber_level: Current BER level contour value
* @voltage_steps: Number of mandatory voltage steps
* @max_voltage_offset: Maximum mandatory voltage offset (in mV)
* @time_steps: Number of time margin steps
* @max_time_offset: Maximum time margin offset (in mUI)
* @software: %true if software margining is used instead of hardware
* @time: %true if time margining is used instead of voltage
* @right_high: %false if left/low margin test is performed, %true if
* right/high
*/
struct tb_margining {
u32 caps[2];
u32 results[2];
unsigned int lanes;
unsigned int min_ber_level;
unsigned int max_ber_level;
unsigned int ber_level;
unsigned int voltage_steps;
unsigned int max_voltage_offset;
unsigned int time_steps;
unsigned int max_time_offset;
bool software;
bool time;
bool right_high;
};
static bool supports_software(const struct usb4_port *usb4)
{
return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW;
}
static bool supports_hardware(const struct usb4_port *usb4)
{
return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW;
}
static bool both_lanes(const struct usb4_port *usb4)
{
return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_2_LANES;
}
static unsigned int independent_voltage_margins(const struct usb4_port *usb4)
{
return (usb4->margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK) >>
USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT;
}
static bool supports_time(const struct usb4_port *usb4)
{
return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_TIME;
}
/* Only applicable if supports_time() returns true */
static unsigned int independent_time_margins(const struct usb4_port *usb4)
{
return (usb4->margining->caps[1] & USB4_MARGIN_CAP_1_TIME_INDP_MASK) >>
USB4_MARGIN_CAP_1_TIME_INDP_SHIFT;
}
static ssize_t
margining_ber_level_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
unsigned int val;
int ret = 0;
char *buf;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
if (usb4->margining->software) {
ret = -EINVAL;
goto out_unlock;
}
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
goto out_unlock;
}
buf[count - 1] = '\0';
ret = kstrtouint(buf, 10, &val);
if (ret)
goto out_free;
if (val < usb4->margining->min_ber_level ||
val > usb4->margining->max_ber_level) {
ret = -EINVAL;
goto out_free;
}
usb4->margining->ber_level = val;
out_free:
free_page((unsigned long)buf);
out_unlock:
mutex_unlock(&tb->lock);
return ret < 0 ? ret : count;
}
static void ber_level_show(struct seq_file *s, unsigned int val)
{
if (val % 2)
seq_printf(s, "3 * 1e%d (%u)\n", -12 + (val + 1) / 2, val);
else
seq_printf(s, "1e%d (%u)\n", -12 + val / 2, val);
}
static int margining_ber_level_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
if (usb4->margining->software)
return -EINVAL;
ber_level_show(s, usb4->margining->ber_level);
return 0;
}
DEBUGFS_ATTR_RW(margining_ber_level);
static int margining_caps_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
u32 cap0, cap1;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
/* Dump the raw caps first */
cap0 = usb4->margining->caps[0];
seq_printf(s, "0x%08x\n", cap0);
cap1 = usb4->margining->caps[1];
seq_printf(s, "0x%08x\n", cap1);
seq_printf(s, "# software margining: %s\n",
supports_software(usb4) ? "yes" : "no");
if (supports_hardware(usb4)) {
seq_puts(s, "# hardware margining: yes\n");
seq_puts(s, "# minimum BER level contour: ");
ber_level_show(s, usb4->margining->min_ber_level);
seq_puts(s, "# maximum BER level contour: ");
ber_level_show(s, usb4->margining->max_ber_level);
} else {
seq_puts(s, "# hardware margining: no\n");
}
seq_printf(s, "# both lanes simultaneously: %s\n",
both_lanes(usb4) ? "yes" : "no");
seq_printf(s, "# voltage margin steps: %u\n",
usb4->margining->voltage_steps);
seq_printf(s, "# maximum voltage offset: %u mV\n",
usb4->margining->max_voltage_offset);
switch (independent_voltage_margins(usb4)) {
case USB4_MARGIN_CAP_0_VOLTAGE_MIN:
seq_puts(s, "# returns minimum between high and low voltage margins\n");
break;
case USB4_MARGIN_CAP_0_VOLTAGE_HL:
seq_puts(s, "# returns high or low voltage margin\n");
break;
case USB4_MARGIN_CAP_0_VOLTAGE_BOTH:
seq_puts(s, "# returns both high and low margins\n");
break;
}
if (supports_time(usb4)) {
seq_puts(s, "# time margining: yes\n");
seq_printf(s, "# time margining is destructive: %s\n",
cap1 & USB4_MARGIN_CAP_1_TIME_DESTR ? "yes" : "no");
switch (independent_time_margins(usb4)) {
case USB4_MARGIN_CAP_1_TIME_MIN:
seq_puts(s, "# returns minimum between left and right time margins\n");
break;
case USB4_MARGIN_CAP_1_TIME_LR:
seq_puts(s, "# returns left or right margin\n");
break;
case USB4_MARGIN_CAP_1_TIME_BOTH:
seq_puts(s, "# returns both left and right margins\n");
break;
}
seq_printf(s, "# time margin steps: %u\n",
usb4->margining->time_steps);
seq_printf(s, "# maximum time offset: %u mUI\n",
usb4->margining->max_time_offset);
} else {
seq_puts(s, "# time margining: no\n");
}
mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RO(margining_caps);
static ssize_t
margining_lanes_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
int ret = 0;
char *buf;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
buf[count - 1] = '\0';
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_free;
}
if (!strcmp(buf, "0")) {
usb4->margining->lanes = 0;
} else if (!strcmp(buf, "1")) {
usb4->margining->lanes = 1;
} else if (!strcmp(buf, "all")) {
/* Needs to be supported */
if (both_lanes(usb4))
usb4->margining->lanes = 7;
else
ret = -EINVAL;
} else {
ret = -EINVAL;
}
mutex_unlock(&tb->lock);
out_free:
free_page((unsigned long)buf);
return ret < 0 ? ret : count;
}
static int margining_lanes_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
unsigned int lanes;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
lanes = usb4->margining->lanes;
if (both_lanes(usb4)) {
if (!lanes)
seq_puts(s, "[0] 1 all\n");
else if (lanes == 1)
seq_puts(s, "0 [1] all\n");
else
seq_puts(s, "0 1 [all]\n");
} else {
if (!lanes)
seq_puts(s, "[0] 1\n");
else
seq_puts(s, "0 [1]\n");
}
mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RW(margining_lanes);
static ssize_t margining_mode_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
int ret = 0;
char *buf;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
buf[count - 1] = '\0';
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_free;
}
if (!strcmp(buf, "software")) {
if (supports_software(usb4))
usb4->margining->software = true;
else
ret = -EINVAL;
} else if (!strcmp(buf, "hardware")) {
if (supports_hardware(usb4))
usb4->margining->software = false;
else
ret = -EINVAL;
} else {
ret = -EINVAL;
}
mutex_unlock(&tb->lock);
out_free:
free_page((unsigned long)buf);
return ret ? ret : count;
}
static int margining_mode_show(struct seq_file *s, void *not_used)
{
const struct tb_port *port = s->private;
const struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
const char *space = "";
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
if (supports_software(usb4)) {
if (usb4->margining->software)
seq_puts(s, "[software]");
else
seq_puts(s, "software");
space = " ";
}
if (supports_hardware(usb4)) {
if (usb4->margining->software)
seq_printf(s, "%shardware", space);
else
seq_printf(s, "%s[hardware]", space);
}
mutex_unlock(&tb->lock);
seq_puts(s, "\n");
return 0;
}
DEBUGFS_ATTR_RW(margining_mode);
static int margining_run_write(void *data, u64 val)
{
struct tb_port *port = data;
struct usb4_port *usb4 = port->usb4;
struct tb_switch *sw = port->sw;
struct tb_margining *margining;
struct tb_switch *down_sw;
struct tb *tb = sw->tb;
int ret, clx;
if (val != 1)
return -EINVAL;
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm_put;
}
if (tb_is_upstream_port(port))
down_sw = sw;
else if (port->remote)
down_sw = port->remote->sw;
else
down_sw = NULL;
if (down_sw) {
/*
* CL states may interfere with lane margining so
* disable them temporarily now.
*/
ret = tb_switch_clx_disable(down_sw);
if (ret < 0) {
tb_sw_warn(down_sw, "failed to disable CL states\n");
goto out_unlock;
}
clx = ret;
}
margining = usb4->margining;
if (margining->software) {
tb_port_dbg(port, "running software %s lane margining for lanes %u\n",
margining->time ? "time" : "voltage", margining->lanes);
ret = usb4_port_sw_margin(port, margining->lanes, margining->time,
margining->right_high,
USB4_MARGIN_SW_COUNTER_CLEAR);
if (ret)
goto out_clx;
ret = usb4_port_sw_margin_errors(port, &margining->results[0]);
} else {
tb_port_dbg(port, "running hardware %s lane margining for lanes %u\n",
margining->time ? "time" : "voltage", margining->lanes);
/* Clear the results */
margining->results[0] = 0;
margining->results[1] = 0;
ret = usb4_port_hw_margin(port, margining->lanes,
margining->ber_level, margining->time,
margining->right_high, margining->results);
}
out_clx:
if (down_sw)
tb_switch_clx_enable(down_sw, clx);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(margining_run_fops, NULL, margining_run_write,
"%llu\n");
static ssize_t margining_results_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
/* Just clear the results */
usb4->margining->results[0] = 0;
usb4->margining->results[1] = 0;
mutex_unlock(&tb->lock);
return count;
}
static void voltage_margin_show(struct seq_file *s,
const struct tb_margining *margining, u8 val)
{
unsigned int tmp, voltage;
tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
voltage = tmp * margining->max_voltage_offset / margining->voltage_steps;
seq_printf(s, "%u mV (%u)", voltage, tmp);
if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
seq_puts(s, " exceeds maximum");
seq_puts(s, "\n");
}
static void time_margin_show(struct seq_file *s,
const struct tb_margining *margining, u8 val)
{
unsigned int tmp, interval;
tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK;
interval = tmp * margining->max_time_offset / margining->time_steps;
seq_printf(s, "%u mUI (%u)", interval, tmp);
if (val & USB4_MARGIN_HW_RES_1_EXCEEDS)
seq_puts(s, " exceeds maximum");
seq_puts(s, "\n");
}
static int margining_results_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb_margining *margining;
struct tb *tb = port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
margining = usb4->margining;
/* Dump the raw results first */
seq_printf(s, "0x%08x\n", margining->results[0]);
/* Only the hardware margining has two result dwords */
if (!margining->software) {
unsigned int val;
seq_printf(s, "0x%08x\n", margining->results[1]);
if (margining->time) {
if (!margining->lanes || margining->lanes == 7) {
val = margining->results[1];
seq_puts(s, "# lane 0 right time margin: ");
time_margin_show(s, margining, val);
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
seq_puts(s, "# lane 0 left time margin: ");
time_margin_show(s, margining, val);
}
if (margining->lanes == 1 || margining->lanes == 7) {
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
seq_puts(s, "# lane 1 right time margin: ");
time_margin_show(s, margining, val);
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
seq_puts(s, "# lane 1 left time margin: ");
time_margin_show(s, margining, val);
}
} else {
if (!margining->lanes || margining->lanes == 7) {
val = margining->results[1];
seq_puts(s, "# lane 0 high voltage margin: ");
voltage_margin_show(s, margining, val);
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT;
seq_puts(s, "# lane 0 low voltage margin: ");
voltage_margin_show(s, margining, val);
}
if (margining->lanes == 1 || margining->lanes == 7) {
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT;
seq_puts(s, "# lane 1 high voltage margin: ");
voltage_margin_show(s, margining, val);
val = margining->results[1] >>
USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT;
seq_puts(s, "# lane 1 low voltage margin: ");
voltage_margin_show(s, margining, val);
}
}
}
mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RW(margining_results);
static ssize_t margining_test_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
int ret = 0;
char *buf;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
buf[count - 1] = '\0';
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_free;
}
if (!strcmp(buf, "time") && supports_time(usb4))
usb4->margining->time = true;
else if (!strcmp(buf, "voltage"))
usb4->margining->time = false;
else
ret = -EINVAL;
mutex_unlock(&tb->lock);
out_free:
free_page((unsigned long)buf);
return ret ? ret : count;
}
static int margining_test_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
if (supports_time(usb4)) {
if (usb4->margining->time)
seq_puts(s, "voltage [time]\n");
else
seq_puts(s, "[voltage] time\n");
} else {
seq_puts(s, "[voltage]\n");
}
mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RW(margining_test);
static ssize_t margining_margin_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
int ret = 0;
char *buf;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
buf[count - 1] = '\0';
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_free;
}
if (usb4->margining->time) {
if (!strcmp(buf, "left"))
usb4->margining->right_high = false;
else if (!strcmp(buf, "right"))
usb4->margining->right_high = true;
else
ret = -EINVAL;
} else {
if (!strcmp(buf, "low"))
usb4->margining->right_high = false;
else if (!strcmp(buf, "high"))
usb4->margining->right_high = true;
else
ret = -EINVAL;
}
mutex_unlock(&tb->lock);
out_free:
free_page((unsigned long)buf);
return ret ? ret : count;
}
static int margining_margin_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct usb4_port *usb4 = port->usb4;
struct tb *tb = port->sw->tb;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
if (usb4->margining->time) {
if (usb4->margining->right_high)
seq_puts(s, "left [right]\n");
else
seq_puts(s, "[left] right\n");
} else {
if (usb4->margining->right_high)
seq_puts(s, "low [high]\n");
else
seq_puts(s, "[low] high\n");
}
mutex_unlock(&tb->lock);
return 0;
}
DEBUGFS_ATTR_RW(margining_margin);
static void margining_port_init(struct tb_port *port)
{
struct tb_margining *margining;
struct dentry *dir, *parent;
struct usb4_port *usb4;
char dir_name[10];
unsigned int val;
int ret;
usb4 = port->usb4;
if (!usb4)
return;
snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
margining = kzalloc(sizeof(*margining), GFP_KERNEL);
if (!margining)
return;
ret = usb4_port_margining_caps(port, margining->caps);
if (ret) {
kfree(margining);
return;
}
usb4->margining = margining;
/* Set the initial mode */
if (supports_software(usb4))
margining->software = true;
val = (margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK) >>
USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT;
margining->voltage_steps = val;
val = (margining->caps[0] & USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK) >>
USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT;
margining->max_voltage_offset = 74 + val * 2;
if (supports_time(usb4)) {
val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_STEPS_MASK) >>
USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT;
margining->time_steps = val;
val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_OFFSET_MASK) >>
USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT;
/*
* Store it as mUI (milli Unit Interval) because we want
* to keep it as integer.
*/
margining->max_time_offset = 200 + 10 * val;
}
dir = debugfs_create_dir("margining", parent);
if (supports_hardware(usb4)) {
val = (margining->caps[1] & USB4_MARGIN_CAP_1_MIN_BER_MASK) >>
USB4_MARGIN_CAP_1_MIN_BER_SHIFT;
margining->min_ber_level = val;
val = (margining->caps[1] & USB4_MARGIN_CAP_1_MAX_BER_MASK) >>
USB4_MARGIN_CAP_1_MAX_BER_SHIFT;
margining->max_ber_level = val;
/* Set the default to minimum */
margining->ber_level = margining->min_ber_level;
debugfs_create_file("ber_level_contour", 0400, dir, port,
&margining_ber_level_fops);
}
debugfs_create_file("caps", 0400, dir, port, &margining_caps_fops);
debugfs_create_file("lanes", 0600, dir, port, &margining_lanes_fops);
debugfs_create_file("mode", 0600, dir, port, &margining_mode_fops);
debugfs_create_file("run", 0600, dir, port, &margining_run_fops);
debugfs_create_file("results", 0600, dir, port, &margining_results_fops);
debugfs_create_file("test", 0600, dir, port, &margining_test_fops);
if (independent_voltage_margins(usb4) ||
(supports_time(usb4) && independent_time_margins(usb4)))
debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops);
}
static void margining_port_remove(struct tb_port *port)
{
struct dentry *parent;
char dir_name[10];
if (!port->usb4)
return;
snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
parent = debugfs_lookup(dir_name, port->sw->debugfs_dir);
if (parent)
debugfs_remove_recursive(debugfs_lookup("margining", parent));
kfree(port->usb4->margining);
port->usb4->margining = NULL;
}
static void margining_switch_init(struct tb_switch *sw)
{
struct tb_port *upstream, *downstream;
struct tb_switch *parent_sw;
u64 route = tb_route(sw);
if (!route)
return;
upstream = tb_upstream_port(sw);
parent_sw = tb_switch_parent(sw);
downstream = tb_port_at(route, parent_sw);
margining_port_init(downstream);
margining_port_init(upstream);
}
static void margining_switch_remove(struct tb_switch *sw)
{
struct tb_port *upstream, *downstream;
struct tb_switch *parent_sw;
u64 route = tb_route(sw);
if (!route)
return;
upstream = tb_upstream_port(sw);
parent_sw = tb_switch_parent(sw);
downstream = tb_port_at(route, parent_sw);
margining_port_remove(upstream);
margining_port_remove(downstream);
}
static void margining_xdomain_init(struct tb_xdomain *xd)
{
struct tb_switch *parent_sw;
struct tb_port *downstream;
parent_sw = tb_xdomain_parent(xd);
downstream = tb_port_at(xd->route, parent_sw);
margining_port_init(downstream);
}
static void margining_xdomain_remove(struct tb_xdomain *xd)
{
struct tb_switch *parent_sw;
struct tb_port *downstream;
parent_sw = tb_xdomain_parent(xd);
downstream = tb_port_at(xd->route, parent_sw);
margining_port_remove(downstream);
}
#else
static inline void margining_switch_init(struct tb_switch *sw) { }
static inline void margining_switch_remove(struct tb_switch *sw) { }
static inline void margining_xdomain_init(struct tb_xdomain *xd) { }
static inline void margining_xdomain_remove(struct tb_xdomain *xd) { }
#endif
static int port_clear_all_counters(struct tb_port *port)
{
u32 *buf;
int ret;
buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32),
GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0,
COUNTER_SET_LEN * port->config.max_counters);
kfree(buf);
return ret;
}
static ssize_t counters_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct seq_file *s = file->private_data;
struct tb_port *port = s->private;
struct tb_switch *sw = port->sw;
struct tb *tb = port->sw->tb;
char *buf;
int ret;
buf = validate_and_copy_from_user(user_buf, &count);
if (IS_ERR(buf))
return PTR_ERR(buf);
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out;
}
/* If written delimiter only, clear all counters in one shot */
if (buf[0] == '\n') {
ret = port_clear_all_counters(port);
} else {
char *line = buf;
u32 val, offset;
ret = -EINVAL;
while (parse_line(&line, &offset, &val, 1, 4)) {
ret = tb_port_write(port, &val, TB_CFG_COUNTERS,
offset, 1);
if (ret)
break;
}
}
mutex_unlock(&tb->lock);
out:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
free_page((unsigned long)buf);
return ret < 0 ? ret : count;
}
static void cap_show_by_dw(struct seq_file *s, struct tb_switch *sw,
struct tb_port *port, unsigned int cap,
unsigned int offset, u8 cap_id, u8 vsec_id,
int dwords)
{
int i, ret;
u32 data;
for (i = 0; i < dwords; i++) {
if (port)
ret = tb_port_read(port, &data, TB_CFG_PORT, cap + offset + i, 1);
else
ret = tb_sw_read(sw, &data, TB_CFG_SWITCH, cap + offset + i, 1);
if (ret) {
seq_printf(s, "0x%04x <not accessible>\n", cap + offset + i);
continue;
}
seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n", cap + offset + i,
offset + i, cap_id, vsec_id, data);
}
}
static void cap_show(struct seq_file *s, struct tb_switch *sw,
struct tb_port *port, unsigned int cap, u8 cap_id,
u8 vsec_id, int length)
{
int ret, offset = 0;
while (length > 0) {
int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH);
u32 data[TB_MAX_CONFIG_RW_LENGTH];
if (port)
ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset,
dwords);
else
ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
if (ret) {
cap_show_by_dw(s, sw, port, cap, offset, cap_id, vsec_id, length);
return;
}
for (i = 0; i < dwords; i++) {
seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n",
cap + offset + i, offset + i,
cap_id, vsec_id, data[i]);
}
length -= dwords;
offset += dwords;
}
}
static void port_cap_show(struct tb_port *port, struct seq_file *s,
unsigned int cap)
{
struct tb_cap_any header;
u8 vsec_id = 0;
size_t length;
int ret;
ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1);
if (ret) {
seq_printf(s, "0x%04x <capability read failed>\n", cap);
return;
}
switch (header.basic.cap) {
case TB_PORT_CAP_PHY:
length = PORT_CAP_LANE_LEN;
break;
case TB_PORT_CAP_TIME1:
if (usb4_switch_version(port->sw) < 2)
length = PORT_CAP_TMU_V1_LEN;
else
length = PORT_CAP_TMU_V2_LEN;
break;
case TB_PORT_CAP_POWER:
length = PORT_CAP_POWER_LEN;
break;
case TB_PORT_CAP_ADAP:
if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
if (usb4_switch_version(port->sw) < 2)
length = PORT_CAP_V1_PCIE_LEN;
else
length = PORT_CAP_V2_PCIE_LEN;
} else if (tb_port_is_dpin(port)) {
if (usb4_switch_version(port->sw) < 2)
length = PORT_CAP_DP_V1_LEN;
else
length = PORT_CAP_DP_V2_LEN;
} else if (tb_port_is_dpout(port)) {
length = PORT_CAP_DP_V1_LEN;
} else if (tb_port_is_usb3_down(port) ||
tb_port_is_usb3_up(port)) {
length = PORT_CAP_USB3_LEN;
} else {
seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
cap, header.basic.cap);
return;
}
break;
case TB_PORT_CAP_VSE:
if (!header.extended_short.length) {
ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT,
cap + 1, 1);
if (ret) {
seq_printf(s, "0x%04x <capability read failed>\n",
cap + 1);
return;
}
length = header.extended_long.length;
vsec_id = header.extended_short.vsec_id;
} else {
length = header.extended_short.length;
vsec_id = header.extended_short.vsec_id;
}
break;
case TB_PORT_CAP_USB4:
length = PORT_CAP_USB4_LEN;
break;
default:
seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
cap, header.basic.cap);
return;
}
cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length);
}
static void port_caps_show(struct tb_port *port, struct seq_file *s)
{
int cap;
cap = tb_port_next_cap(port, 0);
while (cap > 0) {
port_cap_show(port, s, cap);
cap = tb_port_next_cap(port, cap);
}
}
static int port_basic_regs_show(struct tb_port *port, struct seq_file *s)
{
u32 data[PORT_CAP_BASIC_LEN];
int ret, i;
ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data));
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(data); i++)
seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
return 0;
}
static int port_regs_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct tb_switch *sw = port->sw;
struct tb *tb = sw->tb;
int ret;
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm_put;
}
seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
ret = port_basic_regs_show(port, s);
if (ret)
goto out_unlock;
port_caps_show(port, s);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
DEBUGFS_ATTR_RW(port_regs);
static void switch_cap_show(struct tb_switch *sw, struct seq_file *s,
unsigned int cap)
{
struct tb_cap_any header;
int ret, length;
u8 vsec_id = 0;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1);
if (ret) {
seq_printf(s, "0x%04x <capability read failed>\n", cap);
return;
}
if (header.basic.cap == TB_SWITCH_CAP_VSE) {
if (!header.extended_short.length) {
ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH,
cap + 1, 1);
if (ret) {
seq_printf(s, "0x%04x <capability read failed>\n",
cap + 1);
return;
}
length = header.extended_long.length;
} else {
length = header.extended_short.length;
}
vsec_id = header.extended_short.vsec_id;
} else {
if (header.basic.cap == TB_SWITCH_CAP_TMU) {
length = SWITCH_CAP_TMU_LEN;
} else {
seq_printf(s, "0x%04x <unknown capability 0x%02x>\n",
cap, header.basic.cap);
return;
}
}
cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length);
}
static void switch_caps_show(struct tb_switch *sw, struct seq_file *s)
{
int cap;
cap = tb_switch_next_cap(sw, 0);
while (cap > 0) {
switch_cap_show(sw, s, cap);
cap = tb_switch_next_cap(sw, cap);
}
}
static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s)
{
u32 data[SWITCH_CAP_BASIC_LEN];
size_t dwords;
int ret, i;
/* Only USB4 has the additional registers */
if (tb_switch_is_usb4(sw))
dwords = ARRAY_SIZE(data);
else
dwords = 7;
ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords);
if (ret)
return ret;
for (i = 0; i < dwords; i++)
seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
return 0;
}
static int switch_regs_show(struct seq_file *s, void *not_used)
{
struct tb_switch *sw = s->private;
struct tb *tb = sw->tb;
int ret;
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm_put;
}
seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
ret = switch_basic_regs_show(sw, s);
if (ret)
goto out_unlock;
switch_caps_show(sw, s);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
DEBUGFS_ATTR_RW(switch_regs);
static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid)
{
u32 data[PATH_LEN];
int ret, i;
ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN,
ARRAY_SIZE(data));
if (ret) {
seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN);
return ret;
}
for (i = 0; i < ARRAY_SIZE(data); i++) {
seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
hopid * PATH_LEN + i, i, hopid, data[i]);
}
return 0;
}
static int path_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct tb_switch *sw = port->sw;
struct tb *tb = sw->tb;
int start, i, ret = 0;
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm_put;
}
seq_puts(s, "# offset relative_offset in_hop_id value\n");
/* NHI and lane adapters have entry for path 0 */
if (tb_port_is_null(port) || tb_port_is_nhi(port)) {
ret = path_show_one(port, s, 0);
if (ret)
goto out_unlock;
}
start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID;
for (i = start; i <= port->config.max_in_hop_id; i++) {
ret = path_show_one(port, s, i);
if (ret)
break;
}
out_unlock:
mutex_unlock(&tb->lock);
out_rpm_put:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
DEBUGFS_ATTR_RO(path);
static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
int counter)
{
u32 data[COUNTER_SET_LEN];
int ret, i;
ret = tb_port_read(port, data, TB_CFG_COUNTERS,
counter * COUNTER_SET_LEN, ARRAY_SIZE(data));
if (ret) {
seq_printf(s, "0x%04x <not accessible>\n",
counter * COUNTER_SET_LEN);
return ret;
}
for (i = 0; i < ARRAY_SIZE(data); i++) {
seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
counter * COUNTER_SET_LEN + i, i, counter, data[i]);
}
return 0;
}
static int counters_show(struct seq_file *s, void *not_used)
{
struct tb_port *port = s->private;
struct tb_switch *sw = port->sw;
struct tb *tb = sw->tb;
int i, ret = 0;
pm_runtime_get_sync(&sw->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out;
}
seq_puts(s, "# offset relative_offset counter_id value\n");
for (i = 0; i < port->config.max_counters; i++) {
ret = counter_set_regs_show(port, s, i);
if (ret)
break;
}
mutex_unlock(&tb->lock);
out:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
DEBUGFS_ATTR_RW(counters);
/**
* tb_switch_debugfs_init() - Add debugfs entries for router
* @sw: Pointer to the router
*
* Adds debugfs directories and files for given router.
*/
void tb_switch_debugfs_init(struct tb_switch *sw)
{
struct dentry *debugfs_dir;
struct tb_port *port;
debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root);
sw->debugfs_dir = debugfs_dir;
debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
&switch_regs_fops);
tb_switch_for_each_port(sw, port) {
struct dentry *debugfs_dir;
char dir_name[10];
if (port->disabled)
continue;
if (port->config.type == TB_TYPE_INACTIVE)
continue;
snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir);
debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir,
port, &port_regs_fops);
debugfs_create_file("path", 0400, debugfs_dir, port,
&path_fops);
if (port->config.counters_support)
debugfs_create_file("counters", 0600, debugfs_dir, port,
&counters_fops);
}
margining_switch_init(sw);
}
/**
* tb_switch_debugfs_remove() - Remove all router debugfs entries
* @sw: Pointer to the router
*
* Removes all previously added debugfs entries under this router.
*/
void tb_switch_debugfs_remove(struct tb_switch *sw)
{
margining_switch_remove(sw);
debugfs_remove_recursive(sw->debugfs_dir);
}
void tb_xdomain_debugfs_init(struct tb_xdomain *xd)
{
margining_xdomain_init(xd);
}
void tb_xdomain_debugfs_remove(struct tb_xdomain *xd)
{
margining_xdomain_remove(xd);
}
/**
* tb_service_debugfs_init() - Add debugfs directory for service
* @svc: Thunderbolt service pointer
*
* Adds debugfs directory for service.
*/
void tb_service_debugfs_init(struct tb_service *svc)
{
svc->debugfs_dir = debugfs_create_dir(dev_name(&svc->dev),
tb_debugfs_root);
}
/**
* tb_service_debugfs_remove() - Remove service debugfs directory
* @svc: Thunderbolt service pointer
*
* Removes the previously created debugfs directory for @svc.
*/
void tb_service_debugfs_remove(struct tb_service *svc)
{
debugfs_remove_recursive(svc->debugfs_dir);
svc->debugfs_dir = NULL;
}
void tb_debugfs_init(void)
{
tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
}
void tb_debugfs_exit(void)
{
debugfs_remove_recursive(tb_debugfs_root);
}
| linux-master | drivers/thunderbolt/debugfs.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt driver - capabilities lookup
*
* Copyright (c) 2014 Andreas Noever <[email protected]>
* Copyright (C) 2018, Intel Corporation
*/
#include <linux/slab.h>
#include <linux/errno.h>
#include "tb.h"
#define CAP_OFFSET_MAX 0xff
#define VSE_CAP_OFFSET_MAX 0xffff
#define TMU_ACCESS_EN BIT(20)
static int tb_port_enable_tmu(struct tb_port *port, bool enable)
{
struct tb_switch *sw = port->sw;
u32 value, offset;
int ret;
/*
* Legacy devices need to have TMU access enabled before port
* space can be fully accessed.
*/
if (tb_switch_is_light_ridge(sw))
offset = 0x26;
else if (tb_switch_is_eagle_ridge(sw))
offset = 0x2a;
else
return 0;
ret = tb_sw_read(sw, &value, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
if (enable)
value |= TMU_ACCESS_EN;
else
value &= ~TMU_ACCESS_EN;
return tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
}
static void tb_port_dummy_read(struct tb_port *port)
{
/*
* When reading from next capability pointer location in port
* config space the read data is not cleared on LR. To avoid
* reading stale data on next read perform one dummy read after
* port capabilities are walked.
*/
if (tb_switch_is_light_ridge(port->sw)) {
u32 dummy;
tb_port_read(port, &dummy, TB_CFG_PORT, 0, 1);
}
}
/**
* tb_port_next_cap() - Return next capability in the linked list
* @port: Port to find the capability for
* @offset: Previous capability offset (%0 for start)
*
* Returns dword offset of the next capability in port config space
* capability list and returns it. Passing %0 returns the first entry in
* the capability list. If no next capability is found returns %0. In case
* of failure returns negative errno.
*/
int tb_port_next_cap(struct tb_port *port, unsigned int offset)
{
struct tb_cap_any header;
int ret;
if (!offset)
return port->config.first_cap_offset;
ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
if (ret)
return ret;
return header.basic.next;
}
static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
{
int offset = 0;
do {
struct tb_cap_any header;
int ret;
offset = tb_port_next_cap(port, offset);
if (offset < 0)
return offset;
ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1);
if (ret)
return ret;
if (header.basic.cap == cap)
return offset;
} while (offset > 0);
return -ENOENT;
}
/**
* tb_port_find_cap() - Find port capability
* @port: Port to find the capability for
* @cap: Capability to look
*
* Returns offset to start of capability or %-ENOENT if no such
* capability was found. Negative errno is returned if there was an
* error.
*/
int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap)
{
int ret;
ret = tb_port_enable_tmu(port, true);
if (ret)
return ret;
ret = __tb_port_find_cap(port, cap);
tb_port_dummy_read(port);
tb_port_enable_tmu(port, false);
return ret;
}
/**
* tb_switch_next_cap() - Return next capability in the linked list
* @sw: Switch to find the capability for
* @offset: Previous capability offset (%0 for start)
*
* Finds dword offset of the next capability in router config space
* capability list and returns it. Passing %0 returns the first entry in
* the capability list. If no next capability is found returns %0. In case
* of failure returns negative errno.
*/
int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset)
{
struct tb_cap_any header;
int ret;
if (!offset)
return sw->config.first_cap_offset;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2);
if (ret)
return ret;
switch (header.basic.cap) {
case TB_SWITCH_CAP_TMU:
ret = header.basic.next;
break;
case TB_SWITCH_CAP_VSE:
if (!header.extended_short.length)
ret = header.extended_long.next;
else
ret = header.extended_short.next;
break;
default:
tb_sw_dbg(sw, "unknown capability %#x at %#x\n",
header.basic.cap, offset);
ret = -EINVAL;
break;
}
return ret >= VSE_CAP_OFFSET_MAX ? 0 : ret;
}
/**
* tb_switch_find_cap() - Find switch capability
* @sw: Switch to find the capability for
* @cap: Capability to look
*
* Returns offset to start of capability or %-ENOENT if no such
* capability was found. Negative errno is returned if there was an
* error.
*/
int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap)
{
int offset = 0;
do {
struct tb_cap_any header;
int ret;
offset = tb_switch_next_cap(sw, offset);
if (offset < 0)
return offset;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
if (header.basic.cap == cap)
return offset;
} while (offset);
return -ENOENT;
}
/**
* tb_switch_find_vse_cap() - Find switch vendor specific capability
* @sw: Switch to find the capability for
* @vsec: Vendor specific capability to look
*
* Functions enumerates vendor specific capabilities (VSEC) of a switch
* and returns offset when capability matching @vsec is found. If no
* such capability is found returns %-ENOENT. In case of error returns
* negative errno.
*/
int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec)
{
int offset = 0;
do {
struct tb_cap_any header;
int ret;
offset = tb_switch_next_cap(sw, offset);
if (offset < 0)
return offset;
ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
if (header.extended_short.cap == TB_SWITCH_CAP_VSE &&
header.extended_short.vsec_id == vsec)
return offset;
} while (offset);
return -ENOENT;
}
| linux-master | drivers/thunderbolt/cap.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt driver - bus logic (NHI independent)
*
* Copyright (c) 2014 Andreas Noever <[email protected]>
* Copyright (C) 2019, Intel Corporation
*/
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/x86/apple.h>
#include "tb.h"
#include "tb_regs.h"
#include "tunnel.h"
#define TB_TIMEOUT 100 /* ms */
#define MAX_GROUPS 7 /* max Group_ID is 7 */
/**
* struct tb_cm - Simple Thunderbolt connection manager
* @tunnel_list: List of active tunnels
* @dp_resources: List of available DP resources for DP tunneling
* @hotplug_active: tb_handle_hotplug will stop progressing plug
* events and exit if this is not set (it needs to
* acquire the lock one more time). Used to drain wq
* after cfg has been paused.
* @remove_work: Work used to remove any unplugged routers after
* runtime resume
* @groups: Bandwidth groups used in this domain.
*/
struct tb_cm {
struct list_head tunnel_list;
struct list_head dp_resources;
bool hotplug_active;
struct delayed_work remove_work;
struct tb_bandwidth_group groups[MAX_GROUPS];
};
static inline struct tb *tcm_to_tb(struct tb_cm *tcm)
{
return ((void *)tcm - sizeof(struct tb));
}
struct tb_hotplug_event {
struct work_struct work;
struct tb *tb;
u64 route;
u8 port;
bool unplug;
};
static void tb_init_bandwidth_groups(struct tb_cm *tcm)
{
int i;
for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
struct tb_bandwidth_group *group = &tcm->groups[i];
group->tb = tcm_to_tb(tcm);
group->index = i + 1;
INIT_LIST_HEAD(&group->ports);
}
}
static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
struct tb_port *in)
{
if (!group || WARN_ON(in->group))
return;
in->group = group;
list_add_tail(&in->group_list, &group->ports);
tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
}
static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
{
int i;
for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
struct tb_bandwidth_group *group = &tcm->groups[i];
if (list_empty(&group->ports))
return group;
}
return NULL;
}
static struct tb_bandwidth_group *
tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
struct tb_port *out)
{
struct tb_bandwidth_group *group;
struct tb_tunnel *tunnel;
/*
* Find all DP tunnels that go through all the same USB4 links
* as this one. Because we always setup tunnels the same way we
* can just check for the routers at both ends of the tunnels
* and if they are the same we have a match.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (!tb_tunnel_is_dp(tunnel))
continue;
if (tunnel->src_port->sw == in->sw &&
tunnel->dst_port->sw == out->sw) {
group = tunnel->src_port->group;
if (group) {
tb_bandwidth_group_attach_port(group, in);
return group;
}
}
}
/* Pick up next available group then */
group = tb_find_free_bandwidth_group(tcm);
if (group)
tb_bandwidth_group_attach_port(group, in);
else
tb_port_warn(in, "no available bandwidth groups\n");
return group;
}
static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
struct tb_port *out)
{
if (usb4_dp_port_bandwidth_mode_enabled(in)) {
int index, i;
index = usb4_dp_port_group_id(in);
for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
if (tcm->groups[i].index == index) {
tb_bandwidth_group_attach_port(&tcm->groups[i], in);
return;
}
}
}
tb_attach_bandwidth_group(tcm, in, out);
}
static void tb_detach_bandwidth_group(struct tb_port *in)
{
struct tb_bandwidth_group *group = in->group;
if (group) {
in->group = NULL;
list_del_init(&in->group_list);
tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
}
}
static void tb_handle_hotplug(struct work_struct *work);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
{
struct tb_hotplug_event *ev;
ev = kmalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
return;
ev->tb = tb;
ev->route = route;
ev->port = port;
ev->unplug = unplug;
INIT_WORK(&ev->work, tb_handle_hotplug);
queue_work(tb->wq, &ev->work);
}
/* enumeration & hot plug handling */
static void tb_add_dp_resources(struct tb_switch *sw)
{
struct tb_cm *tcm = tb_priv(sw->tb);
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
if (!tb_port_is_dpin(port))
continue;
if (!tb_switch_query_dp_resource(sw, port))
continue;
list_add_tail(&port->list, &tcm->dp_resources);
tb_port_dbg(port, "DP IN resource available\n");
}
}
static void tb_remove_dp_resources(struct tb_switch *sw)
{
struct tb_cm *tcm = tb_priv(sw->tb);
struct tb_port *port, *tmp;
/* Clear children resources first */
tb_switch_for_each_port(sw, port) {
if (tb_port_has_remote(port))
tb_remove_dp_resources(port->remote->sw);
}
list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) {
if (port->sw == sw) {
tb_port_dbg(port, "DP OUT resource unavailable\n");
list_del_init(&port->list);
}
}
}
static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *p;
list_for_each_entry(p, &tcm->dp_resources, list) {
if (p == port)
return;
}
tb_port_dbg(port, "DP %s resource available discovered\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
}
static void tb_discover_dp_resources(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_dp(tunnel))
tb_discover_dp_resource(tb, tunnel->dst_port);
}
}
/* Enables CL states up to host router */
static int tb_enable_clx(struct tb_switch *sw)
{
struct tb_cm *tcm = tb_priv(sw->tb);
unsigned int clx = TB_CL0S | TB_CL1;
const struct tb_tunnel *tunnel;
int ret;
/*
* Currently only enable CLx for the first link. This is enough
* to allow the CPU to save energy at least on Intel hardware
* and makes it slightly simpler to implement. We may change
* this in the future to cover the whole topology if it turns
* out to be beneficial.
*/
while (sw && sw->config.depth > 1)
sw = tb_switch_parent(sw);
if (!sw)
return 0;
if (sw->config.depth != 1)
return 0;
/*
* If we are re-enabling then check if there is an active DMA
* tunnel and in that case bail out.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_dma(tunnel)) {
if (tb_tunnel_port_on_path(tunnel, tb_upstream_port(sw)))
return 0;
}
}
/*
* Initially try with CL2. If that's not supported by the
* topology try with CL0s and CL1 and then give up.
*/
ret = tb_switch_clx_enable(sw, clx | TB_CL2);
if (ret == -EOPNOTSUPP)
ret = tb_switch_clx_enable(sw, clx);
return ret == -EOPNOTSUPP ? 0 : ret;
}
/* Disables CL states up to the host router */
static void tb_disable_clx(struct tb_switch *sw)
{
do {
if (tb_switch_clx_disable(sw) < 0)
tb_sw_warn(sw, "failed to disable CL states\n");
sw = tb_switch_parent(sw);
} while (sw);
}
static int tb_increase_switch_tmu_accuracy(struct device *dev, void *data)
{
struct tb_switch *sw;
sw = tb_to_switch(dev);
if (!sw)
return 0;
if (tb_switch_tmu_is_configured(sw, TB_SWITCH_TMU_MODE_LOWRES)) {
enum tb_switch_tmu_mode mode;
int ret;
if (tb_switch_clx_is_enabled(sw, TB_CL1))
mode = TB_SWITCH_TMU_MODE_HIFI_UNI;
else
mode = TB_SWITCH_TMU_MODE_HIFI_BI;
ret = tb_switch_tmu_configure(sw, mode);
if (ret)
return ret;
return tb_switch_tmu_enable(sw);
}
return 0;
}
static void tb_increase_tmu_accuracy(struct tb_tunnel *tunnel)
{
struct tb_switch *sw;
if (!tunnel)
return;
/*
* Once first DP tunnel is established we change the TMU
* accuracy of first depth child routers (and the host router)
* to the highest. This is needed for the DP tunneling to work
* but also allows CL0s.
*
* If both routers are v2 then we don't need to do anything as
* they are using enhanced TMU mode that allows all CLx.
*/
sw = tunnel->tb->root_switch;
device_for_each_child(&sw->dev, NULL, tb_increase_switch_tmu_accuracy);
}
static int tb_enable_tmu(struct tb_switch *sw)
{
int ret;
/*
* If both routers at the end of the link are v2 we simply
* enable the enhanched uni-directional mode. That covers all
* the CL states. For v1 and before we need to use the normal
* rate to allow CL1 (when supported). Otherwise we keep the TMU
* running at the highest accuracy.
*/
ret = tb_switch_tmu_configure(sw,
TB_SWITCH_TMU_MODE_MEDRES_ENHANCED_UNI);
if (ret == -EOPNOTSUPP) {
if (tb_switch_clx_is_enabled(sw, TB_CL1))
ret = tb_switch_tmu_configure(sw,
TB_SWITCH_TMU_MODE_LOWRES);
else
ret = tb_switch_tmu_configure(sw,
TB_SWITCH_TMU_MODE_HIFI_BI);
}
if (ret)
return ret;
/* If it is already enabled in correct mode, don't touch it */
if (tb_switch_tmu_is_enabled(sw))
return 0;
ret = tb_switch_tmu_disable(sw);
if (ret)
return ret;
ret = tb_switch_tmu_post_time(sw);
if (ret)
return ret;
return tb_switch_tmu_enable(sw);
}
static void tb_switch_discover_tunnels(struct tb_switch *sw,
struct list_head *list,
bool alloc_hopids)
{
struct tb *tb = sw->tb;
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
struct tb_tunnel *tunnel = NULL;
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
tb_increase_tmu_accuracy(tunnel);
break;
case TB_TYPE_PCIE_DOWN:
tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
break;
case TB_TYPE_USB3_DOWN:
tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
break;
default:
break;
}
if (tunnel)
list_add_tail(&tunnel->list, list);
}
tb_switch_for_each_port(sw, port) {
if (tb_port_has_remote(port)) {
tb_switch_discover_tunnels(port->remote->sw, list,
alloc_hopids);
}
}
}
static void tb_discover_tunnels(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_pci(tunnel)) {
struct tb_switch *parent = tunnel->dst_port->sw;
while (parent != tunnel->src_port->sw) {
parent->boot = true;
parent = tb_switch_parent(parent);
}
} else if (tb_tunnel_is_dp(tunnel)) {
struct tb_port *in = tunnel->src_port;
struct tb_port *out = tunnel->dst_port;
/* Keep the domain from powering down */
pm_runtime_get_sync(&in->sw->dev);
pm_runtime_get_sync(&out->sw->dev);
tb_discover_bandwidth_group(tcm, in, out);
}
}
}
static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
if (tb_switch_is_usb4(port->sw))
return usb4_port_configure_xdomain(port, xd);
return tb_lc_configure_xdomain(port);
}
static void tb_port_unconfigure_xdomain(struct tb_port *port)
{
if (tb_switch_is_usb4(port->sw))
usb4_port_unconfigure_xdomain(port);
else
tb_lc_unconfigure_xdomain(port);
tb_port_enable(port->dual_link_port);
}
static void tb_scan_xdomain(struct tb_port *port)
{
struct tb_switch *sw = port->sw;
struct tb *tb = sw->tb;
struct tb_xdomain *xd;
u64 route;
if (!tb_is_xdomain_enabled())
return;
route = tb_downstream_route(port);
xd = tb_xdomain_find_by_route(tb, route);
if (xd) {
tb_xdomain_put(xd);
return;
}
xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid,
NULL);
if (xd) {
tb_port_at(route, sw)->xdomain = xd;
tb_port_configure_xdomain(port, xd);
tb_xdomain_add(xd);
}
}
/**
* tb_find_unused_port() - return the first inactive port on @sw
* @sw: Switch to find the port on
* @type: Port type to look for
*/
static struct tb_port *tb_find_unused_port(struct tb_switch *sw,
enum tb_port_type type)
{
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
if (tb_is_upstream_port(port))
continue;
if (port->config.type != type)
continue;
if (!port->cap_adap)
continue;
if (tb_port_is_enabled(port))
continue;
return port;
}
return NULL;
}
static struct tb_port *tb_find_usb3_down(struct tb_switch *sw,
const struct tb_port *port)
{
struct tb_port *down;
down = usb4_switch_map_usb3_down(sw, port);
if (down && !tb_usb3_port_is_enabled(down))
return down;
return NULL;
}
static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type,
struct tb_port *src_port,
struct tb_port *dst_port)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tunnel->type == type &&
((src_port && src_port == tunnel->src_port) ||
(dst_port && dst_port == tunnel->dst_port))) {
return tunnel;
}
}
return NULL;
}
static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb,
struct tb_port *src_port,
struct tb_port *dst_port)
{
struct tb_port *port, *usb3_down;
struct tb_switch *sw;
/* Pick the router that is deepest in the topology */
if (dst_port->sw->config.depth > src_port->sw->config.depth)
sw = dst_port->sw;
else
sw = src_port->sw;
/* Can't be the host router */
if (sw == tb->root_switch)
return NULL;
/* Find the downstream USB4 port that leads to this router */
port = tb_port_at(tb_route(sw), tb->root_switch);
/* Find the corresponding host router USB3 downstream port */
usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port);
if (!usb3_down)
return NULL;
return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL);
}
static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port,
struct tb_port *dst_port, int *available_up, int *available_down)
{
int usb3_consumed_up, usb3_consumed_down, ret;
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
struct tb_port *port;
tb_dbg(tb, "calculating available bandwidth between %llx:%u <-> %llx:%u\n",
tb_route(src_port->sw), src_port->port, tb_route(dst_port->sw),
dst_port->port);
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
if (tunnel && tunnel->src_port != src_port &&
tunnel->dst_port != dst_port) {
ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up,
&usb3_consumed_down);
if (ret)
return ret;
} else {
usb3_consumed_up = 0;
usb3_consumed_down = 0;
}
/* Maximum possible bandwidth asymmetric Gen 4 link is 120 Gb/s */
*available_up = *available_down = 120000;
/* Find the minimum available bandwidth over all links */
tb_for_each_port_on_path(src_port, dst_port, port) {
int link_speed, link_width, up_bw, down_bw;
if (!tb_port_is_null(port))
continue;
if (tb_is_upstream_port(port)) {
link_speed = port->sw->link_speed;
/*
* sw->link_width is from upstream perspective
* so we use the opposite for downstream of the
* host router.
*/
if (port->sw->link_width == TB_LINK_WIDTH_ASYM_TX) {
up_bw = link_speed * 3 * 1000;
down_bw = link_speed * 1 * 1000;
} else if (port->sw->link_width == TB_LINK_WIDTH_ASYM_RX) {
up_bw = link_speed * 1 * 1000;
down_bw = link_speed * 3 * 1000;
} else {
up_bw = link_speed * port->sw->link_width * 1000;
down_bw = up_bw;
}
} else {
link_speed = tb_port_get_link_speed(port);
if (link_speed < 0)
return link_speed;
link_width = tb_port_get_link_width(port);
if (link_width < 0)
return link_width;
if (link_width == TB_LINK_WIDTH_ASYM_TX) {
up_bw = link_speed * 1 * 1000;
down_bw = link_speed * 3 * 1000;
} else if (link_width == TB_LINK_WIDTH_ASYM_RX) {
up_bw = link_speed * 3 * 1000;
down_bw = link_speed * 1 * 1000;
} else {
up_bw = link_speed * link_width * 1000;
down_bw = up_bw;
}
}
/* Leave 10% guard band */
up_bw -= up_bw / 10;
down_bw -= down_bw / 10;
tb_port_dbg(port, "link total bandwidth %d/%d Mb/s\n", up_bw,
down_bw);
/*
* Find all DP tunnels that cross the port and reduce
* their consumed bandwidth from the available.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
int dp_consumed_up, dp_consumed_down;
if (tb_tunnel_is_invalid(tunnel))
continue;
if (!tb_tunnel_is_dp(tunnel))
continue;
if (!tb_tunnel_port_on_path(tunnel, port))
continue;
/*
* Ignore the DP tunnel between src_port and
* dst_port because it is the same tunnel and we
* may be re-calculating estimated bandwidth.
*/
if (tunnel->src_port == src_port &&
tunnel->dst_port == dst_port)
continue;
ret = tb_tunnel_consumed_bandwidth(tunnel,
&dp_consumed_up,
&dp_consumed_down);
if (ret)
return ret;
up_bw -= dp_consumed_up;
down_bw -= dp_consumed_down;
}
/*
* If USB3 is tunneled from the host router down to the
* branch leading to port we need to take USB3 consumed
* bandwidth into account regardless whether it actually
* crosses the port.
*/
up_bw -= usb3_consumed_up;
down_bw -= usb3_consumed_down;
if (up_bw < *available_up)
*available_up = up_bw;
if (down_bw < *available_down)
*available_down = down_bw;
}
if (*available_up < 0)
*available_up = 0;
if (*available_down < 0)
*available_down = 0;
return 0;
}
static int tb_release_unused_usb3_bandwidth(struct tb *tb,
struct tb_port *src_port,
struct tb_port *dst_port)
{
struct tb_tunnel *tunnel;
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0;
}
static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port,
struct tb_port *dst_port)
{
int ret, available_up, available_down;
struct tb_tunnel *tunnel;
tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port);
if (!tunnel)
return;
tb_dbg(tb, "reclaiming unused bandwidth for USB3\n");
/*
* Calculate available bandwidth for the first hop USB3 tunnel.
* That determines the whole USB3 bandwidth for this branch.
*/
ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port,
&available_up, &available_down);
if (ret) {
tb_warn(tb, "failed to calculate available bandwidth\n");
return;
}
tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n",
available_up, available_down);
tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down);
}
static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
int ret, available_up, available_down;
struct tb_port *up, *down, *port;
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
if (!tb_acpi_may_tunnel_usb3()) {
tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n");
return 0;
}
up = tb_switch_find_port(sw, TB_TYPE_USB3_UP);
if (!up)
return 0;
if (!sw->link_usb4)
return 0;
/*
* Look up available down port. Since we are chaining it should
* be found right above this switch.
*/
port = tb_switch_downstream_port(sw);
down = tb_find_usb3_down(parent, port);
if (!down)
return 0;
if (tb_route(parent)) {
struct tb_port *parent_up;
/*
* Check first that the parent switch has its upstream USB3
* port enabled. Otherwise the chain is not complete and
* there is no point setting up a new tunnel.
*/
parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP);
if (!parent_up || !tb_port_is_enabled(parent_up))
return 0;
/* Make all unused bandwidth available for the new tunnel */
ret = tb_release_unused_usb3_bandwidth(tb, down, up);
if (ret)
return ret;
}
ret = tb_available_bandwidth(tb, down, up, &available_up,
&available_down);
if (ret)
goto err_reclaim;
tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n",
available_up, available_down);
tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up,
available_down);
if (!tunnel) {
ret = -ENOMEM;
goto err_reclaim;
}
if (tb_tunnel_activate(tunnel)) {
tb_port_info(up,
"USB3 tunnel activation failed, aborting\n");
ret = -EIO;
goto err_free;
}
list_add_tail(&tunnel->list, &tcm->tunnel_list);
if (tb_route(parent))
tb_reclaim_usb3_bandwidth(tb, down, up);
return 0;
err_free:
tb_tunnel_free(tunnel);
err_reclaim:
if (tb_route(parent))
tb_reclaim_usb3_bandwidth(tb, down, up);
return ret;
}
static int tb_create_usb3_tunnels(struct tb_switch *sw)
{
struct tb_port *port;
int ret;
if (!tb_acpi_may_tunnel_usb3())
return 0;
if (tb_route(sw)) {
ret = tb_tunnel_usb3(sw->tb, sw);
if (ret)
return ret;
}
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
ret = tb_create_usb3_tunnels(port->remote->sw);
if (ret)
return ret;
}
return 0;
}
static void tb_scan_port(struct tb_port *port);
/*
* tb_scan_switch() - scan for and initialize downstream switches
*/
static void tb_scan_switch(struct tb_switch *sw)
{
struct tb_port *port;
pm_runtime_get_sync(&sw->dev);
tb_switch_for_each_port(sw, port)
tb_scan_port(port);
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
}
/*
* tb_scan_port() - check for and initialize switches below port
*/
static void tb_scan_port(struct tb_port *port)
{
struct tb_cm *tcm = tb_priv(port->sw->tb);
struct tb_port *upstream_port;
bool discovery = false;
struct tb_switch *sw;
if (tb_is_upstream_port(port))
return;
if (tb_port_is_dpout(port) && tb_dp_port_hpd_is_active(port) == 1 &&
!tb_dp_port_is_enabled(port)) {
tb_port_dbg(port, "DP adapter HPD set, queuing hotplug\n");
tb_queue_hotplug(port->sw->tb, tb_route(port->sw), port->port,
false);
return;
}
if (port->config.type != TB_TYPE_PORT)
return;
if (port->dual_link_port && port->link_nr)
return; /*
* Downstream switch is reachable through two ports.
* Only scan on the primary port (link_nr == 0).
*/
if (port->usb4)
pm_runtime_get_sync(&port->usb4->dev);
if (tb_wait_for_port(port, false) <= 0)
goto out_rpm_put;
if (port->remote) {
tb_port_dbg(port, "port already has a remote\n");
goto out_rpm_put;
}
tb_retimer_scan(port, true);
sw = tb_switch_alloc(port->sw->tb, &port->sw->dev,
tb_downstream_route(port));
if (IS_ERR(sw)) {
/*
* If there is an error accessing the connected switch
* it may be connected to another domain. Also we allow
* the other domain to be connected to a max depth switch.
*/
if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
tb_scan_xdomain(port);
goto out_rpm_put;
}
if (tb_switch_configure(sw)) {
tb_switch_put(sw);
goto out_rpm_put;
}
/*
* If there was previously another domain connected remove it
* first.
*/
if (port->xdomain) {
tb_xdomain_remove(port->xdomain);
tb_port_unconfigure_xdomain(port);
port->xdomain = NULL;
}
/*
* Do not send uevents until we have discovered all existing
* tunnels and know which switches were authorized already by
* the boot firmware.
*/
if (!tcm->hotplug_active) {
dev_set_uevent_suppress(&sw->dev, true);
discovery = true;
}
/*
* At the moment Thunderbolt 2 and beyond (devices with LC) we
* can support runtime PM.
*/
sw->rpm = sw->generation > 1;
if (tb_switch_add(sw)) {
tb_switch_put(sw);
goto out_rpm_put;
}
/* Link the switches using both links if available */
upstream_port = tb_upstream_port(sw);
port->remote = upstream_port;
upstream_port->remote = port;
if (port->dual_link_port && upstream_port->dual_link_port) {
port->dual_link_port->remote = upstream_port->dual_link_port;
upstream_port->dual_link_port->remote = port->dual_link_port;
}
/* Enable lane bonding if supported */
tb_switch_lane_bonding_enable(sw);
/* Set the link configured */
tb_switch_configure_link(sw);
/*
* CL0s and CL1 are enabled and supported together.
* Silently ignore CLx enabling in case CLx is not supported.
*/
if (discovery)
tb_sw_dbg(sw, "discovery, not touching CL states\n");
else if (tb_enable_clx(sw))
tb_sw_warn(sw, "failed to enable CL states\n");
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to enable TMU\n");
/*
* Configuration valid needs to be set after the TMU has been
* enabled for the upstream port of the router so we do it here.
*/
tb_switch_configuration_valid(sw);
/* Scan upstream retimers */
tb_retimer_scan(upstream_port, true);
/*
* Create USB 3.x tunnels only when the switch is plugged to the
* domain. This is because we scan the domain also during discovery
* and want to discover existing USB 3.x tunnels before we create
* any new.
*/
if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw))
tb_sw_warn(sw, "USB3 tunnel creation failed\n");
tb_add_dp_resources(sw);
tb_scan_switch(sw);
out_rpm_put:
if (port->usb4) {
pm_runtime_mark_last_busy(&port->usb4->dev);
pm_runtime_put_autosuspend(&port->usb4->dev);
}
}
static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
{
struct tb_port *src_port, *dst_port;
struct tb *tb;
if (!tunnel)
return;
tb_tunnel_deactivate(tunnel);
list_del(&tunnel->list);
tb = tunnel->tb;
src_port = tunnel->src_port;
dst_port = tunnel->dst_port;
switch (tunnel->type) {
case TB_TUNNEL_DP:
tb_detach_bandwidth_group(src_port);
/*
* In case of DP tunnel make sure the DP IN resource is
* deallocated properly.
*/
tb_switch_dealloc_dp_resource(src_port->sw, src_port);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
pm_runtime_mark_last_busy(&src_port->sw->dev);
pm_runtime_put_autosuspend(&src_port->sw->dev);
fallthrough;
case TB_TUNNEL_USB3:
tb_reclaim_usb3_bandwidth(tb, src_port, dst_port);
break;
default:
/*
* PCIe and DMA tunnels do not consume guaranteed
* bandwidth.
*/
break;
}
tb_tunnel_free(tunnel);
}
/*
* tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away
*/
static void tb_free_invalid_tunnels(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
struct tb_tunnel *n;
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
if (tb_tunnel_is_invalid(tunnel))
tb_deactivate_and_free_tunnel(tunnel);
}
}
/*
* tb_free_unplugged_children() - traverse hierarchy and free unplugged switches
*/
static void tb_free_unplugged_children(struct tb_switch *sw)
{
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port))
continue;
if (port->remote->sw->is_unplugged) {
tb_retimer_remove_all(port);
tb_remove_dp_resources(port->remote->sw);
tb_switch_unconfigure_link(port->remote->sw);
tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
port->dual_link_port->remote = NULL;
} else {
tb_free_unplugged_children(port->remote->sw);
}
}
}
static struct tb_port *tb_find_pcie_down(struct tb_switch *sw,
const struct tb_port *port)
{
struct tb_port *down = NULL;
/*
* To keep plugging devices consistently in the same PCIe
* hierarchy, do mapping here for switch downstream PCIe ports.
*/
if (tb_switch_is_usb4(sw)) {
down = usb4_switch_map_pcie_down(sw, port);
} else if (!tb_route(sw)) {
int phy_port = tb_phy_port_from_link(port->port);
int index;
/*
* Hard-coded Thunderbolt port to PCIe down port mapping
* per controller.
*/
if (tb_switch_is_cactus_ridge(sw) ||
tb_switch_is_alpine_ridge(sw))
index = !phy_port ? 6 : 7;
else if (tb_switch_is_falcon_ridge(sw))
index = !phy_port ? 6 : 8;
else if (tb_switch_is_titan_ridge(sw))
index = !phy_port ? 8 : 9;
else
goto out;
/* Validate the hard-coding */
if (WARN_ON(index > sw->config.max_port_number))
goto out;
down = &sw->ports[index];
}
if (down) {
if (WARN_ON(!tb_port_is_pcie_down(down)))
goto out;
if (tb_pci_port_is_enabled(down))
goto out;
return down;
}
out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
static void
tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
{
struct tb_tunnel *first_tunnel;
struct tb *tb = group->tb;
struct tb_port *in;
int ret;
tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
group->index);
first_tunnel = NULL;
list_for_each_entry(in, &group->ports, group_list) {
int estimated_bw, estimated_up, estimated_down;
struct tb_tunnel *tunnel;
struct tb_port *out;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
continue;
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
if (WARN_ON(!tunnel))
break;
if (!first_tunnel) {
/*
* Since USB3 bandwidth is shared by all DP
* tunnels under the host router USB4 port, even
* if they do not begin from the host router, we
* can release USB3 bandwidth just once and not
* for each tunnel separately.
*/
first_tunnel = tunnel;
ret = tb_release_unused_usb3_bandwidth(tb,
first_tunnel->src_port, first_tunnel->dst_port);
if (ret) {
tb_port_warn(in,
"failed to release unused bandwidth\n");
break;
}
}
out = tunnel->dst_port;
ret = tb_available_bandwidth(tb, in, out, &estimated_up,
&estimated_down);
if (ret) {
tb_port_warn(in,
"failed to re-calculate estimated bandwidth\n");
break;
}
/*
* Estimated bandwidth includes:
* - already allocated bandwidth for the DP tunnel
* - available bandwidth along the path
* - bandwidth allocated for USB 3.x but not used.
*/
tb_port_dbg(in, "re-calculated estimated bandwidth %u/%u Mb/s\n",
estimated_up, estimated_down);
if (in->sw->config.depth < out->sw->config.depth)
estimated_bw = estimated_down;
else
estimated_bw = estimated_up;
if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
tb_port_warn(in, "failed to update estimated bandwidth\n");
}
if (first_tunnel)
tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
first_tunnel->dst_port);
tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
}
static void tb_recalc_estimated_bandwidth(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
int i;
tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
struct tb_bandwidth_group *group = &tcm->groups[i];
if (!list_empty(&group->ports))
tb_recalc_estimated_bandwidth_for_group(group);
}
tb_dbg(tb, "bandwidth re-calculation done\n");
}
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
{
struct tb_port *host_port, *port;
struct tb_cm *tcm = tb_priv(tb);
host_port = tb_route(in->sw) ?
tb_port_at(tb_route(in->sw), tb->root_switch) : NULL;
list_for_each_entry(port, &tcm->dp_resources, list) {
if (!tb_port_is_dpout(port))
continue;
if (tb_port_is_enabled(port)) {
tb_port_dbg(port, "DP OUT in use\n");
continue;
}
tb_port_dbg(port, "DP OUT available\n");
/*
* Keep the DP tunnel under the topology starting from
* the same host router downstream port.
*/
if (host_port && tb_route(port->sw)) {
struct tb_port *p;
p = tb_port_at(tb_route(port->sw), tb->root_switch);
if (p != host_port)
continue;
}
return port;
}
return NULL;
}
static void tb_tunnel_dp(struct tb *tb)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port, *in, *out;
struct tb_tunnel *tunnel;
if (!tb_acpi_may_tunnel_dp()) {
tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
return;
}
/*
* Find pair of inactive DP IN and DP OUT adapters and then
* establish a DP tunnel between them.
*/
tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
in = NULL;
out = NULL;
list_for_each_entry(port, &tcm->dp_resources, list) {
if (!tb_port_is_dpin(port))
continue;
if (tb_port_is_enabled(port)) {
tb_port_dbg(port, "DP IN in use\n");
continue;
}
tb_port_dbg(port, "DP IN available\n");
out = tb_find_dp_out(tb, port);
if (out) {
in = port;
break;
}
}
if (!in) {
tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
return;
}
if (!out) {
tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
return;
}
/*
* This is only applicable to links that are not bonded (so
* when Thunderbolt 1 hardware is involved somewhere in the
* topology). For these try to share the DP bandwidth between
* the two lanes.
*/
link_nr = 1;
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_dp(tunnel)) {
link_nr = 0;
break;
}
}
/*
* DP stream needs the domain to be active so runtime resume
* both ends of the tunnel.
*
* This should bring the routers in the middle active as well
* and keeps the domain from runtime suspending while the DP
* tunnel is active.
*/
pm_runtime_get_sync(&in->sw->dev);
pm_runtime_get_sync(&out->sw->dev);
if (tb_switch_alloc_dp_resource(in->sw, in)) {
tb_port_dbg(in, "no resource available for DP IN, not tunneling\n");
goto err_rpm_put;
}
if (!tb_attach_bandwidth_group(tcm, in, out))
goto err_dealloc_dp;
/* Make all unused USB3 bandwidth available for the new DP tunnel */
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret) {
tb_warn(tb, "failed to release unused bandwidth\n");
goto err_detach_group;
}
ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
if (ret)
goto err_reclaim_usb;
tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n",
available_up, available_down);
tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up,
available_down);
if (!tunnel) {
tb_port_dbg(out, "could not allocate DP tunnel\n");
goto err_reclaim_usb;
}
if (tb_tunnel_activate(tunnel)) {
tb_port_info(out, "DP tunnel activation failed, aborting\n");
goto err_free;
}
list_add_tail(&tunnel->list, &tcm->tunnel_list);
tb_reclaim_usb3_bandwidth(tb, in, out);
/* Update the domain with the new bandwidth estimation */
tb_recalc_estimated_bandwidth(tb);
/*
* In case of DP tunnel exists, change host router's 1st children
* TMU mode to HiFi for CL0s to work.
*/
tb_increase_tmu_accuracy(tunnel);
return;
err_free:
tb_tunnel_free(tunnel);
err_reclaim_usb:
tb_reclaim_usb3_bandwidth(tb, in, out);
err_detach_group:
tb_detach_bandwidth_group(in);
err_dealloc_dp:
tb_switch_dealloc_dp_resource(in->sw, in);
err_rpm_put:
pm_runtime_mark_last_busy(&out->sw->dev);
pm_runtime_put_autosuspend(&out->sw->dev);
pm_runtime_mark_last_busy(&in->sw->dev);
pm_runtime_put_autosuspend(&in->sw->dev);
}
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
{
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
if (tb_port_is_dpin(port)) {
tb_port_dbg(port, "DP IN resource unavailable\n");
in = port;
out = NULL;
} else {
tb_port_dbg(port, "DP OUT resource unavailable\n");
in = NULL;
out = port;
}
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
tb_deactivate_and_free_tunnel(tunnel);
list_del_init(&port->list);
/*
* See if there is another DP OUT port that can be used for
* to create another tunnel.
*/
tb_recalc_estimated_bandwidth(tb);
tb_tunnel_dp(tb);
}
static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *p;
if (tb_port_is_enabled(port))
return;
list_for_each_entry(p, &tcm->dp_resources, list) {
if (p == port)
return;
}
tb_port_dbg(port, "DP %s resource available\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
/* Look for suitable DP IN <-> DP OUT pairs now */
tb_tunnel_dp(tb);
}
static void tb_disconnect_and_release_dp(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel, *n;
/*
* Tear down all DP tunnels and release their resources. They
* will be re-established after resume based on plug events.
*/
list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) {
if (tb_tunnel_is_dp(tunnel))
tb_deactivate_and_free_tunnel(tunnel);
}
while (!list_empty(&tcm->dp_resources)) {
struct tb_port *port;
port = list_first_entry(&tcm->dp_resources,
struct tb_port, list);
list_del_init(&port->list);
}
}
static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw)
{
struct tb_tunnel *tunnel;
struct tb_port *up;
up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
if (WARN_ON(!up))
return -ENODEV;
tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up);
if (WARN_ON(!tunnel))
return -ENODEV;
tb_switch_xhci_disconnect(sw);
tb_tunnel_deactivate(tunnel);
list_del(&tunnel->list);
tb_tunnel_free(tunnel);
return 0;
}
static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
{
struct tb_port *up, *down, *port;
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP);
if (!up)
return 0;
/*
* Look up available down port. Since we are chaining it should
* be found right above this switch.
*/
port = tb_switch_downstream_port(sw);
down = tb_find_pcie_down(tb_switch_parent(sw), port);
if (!down)
return 0;
tunnel = tb_tunnel_alloc_pci(tb, up, down);
if (!tunnel)
return -ENOMEM;
if (tb_tunnel_activate(tunnel)) {
tb_port_info(up,
"PCIe tunnel activation failed, aborting\n");
tb_tunnel_free(tunnel);
return -EIO;
}
/*
* PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
* here.
*/
if (tb_switch_pcie_l1_enable(sw))
tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
if (tb_switch_xhci_connect(sw))
tb_sw_warn(sw, "failed to connect xHCI\n");
list_add_tail(&tunnel->list, &tcm->tunnel_list);
return 0;
}
static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *nhi_port, *dst_port;
struct tb_tunnel *tunnel;
struct tb_switch *sw;
int ret;
sw = tb_to_switch(xd->dev.parent);
dst_port = tb_port_at(xd->route, sw);
nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
mutex_lock(&tb->lock);
/*
* When tunneling DMA paths the link should not enter CL states
* so disable them now.
*/
tb_disable_clx(sw);
tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path,
transmit_ring, receive_path, receive_ring);
if (!tunnel) {
ret = -ENOMEM;
goto err_clx;
}
if (tb_tunnel_activate(tunnel)) {
tb_port_info(nhi_port,
"DMA tunnel activation failed, aborting\n");
ret = -EIO;
goto err_free;
}
list_add_tail(&tunnel->list, &tcm->tunnel_list);
mutex_unlock(&tb->lock);
return 0;
err_free:
tb_tunnel_free(tunnel);
err_clx:
tb_enable_clx(sw);
mutex_unlock(&tb->lock);
return ret;
}
static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_port *nhi_port, *dst_port;
struct tb_tunnel *tunnel, *n;
struct tb_switch *sw;
sw = tb_to_switch(xd->dev.parent);
dst_port = tb_port_at(xd->route, sw);
nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
if (!tb_tunnel_is_dma(tunnel))
continue;
if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port)
continue;
if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring,
receive_path, receive_ring))
tb_deactivate_and_free_tunnel(tunnel);
}
/*
* Try to re-enable CL states now, it is OK if this fails
* because we may still have another DMA tunnel active through
* the same host router USB4 downstream port.
*/
tb_enable_clx(sw);
}
static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{
if (!xd->is_unplugged) {
mutex_lock(&tb->lock);
__tb_disconnect_xdomain_paths(tb, xd, transmit_path,
transmit_ring, receive_path,
receive_ring);
mutex_unlock(&tb->lock);
}
return 0;
}
/* hotplug handling */
/*
* tb_handle_hotplug() - handle hotplug event
*
* Executes on tb->wq.
*/
static void tb_handle_hotplug(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
struct tb_port *port;
/* Bring the domain back from sleep if it was suspended */
pm_runtime_get_sync(&tb->dev);
mutex_lock(&tb->lock);
if (!tcm->hotplug_active)
goto out; /* during init, suspend or shutdown */
sw = tb_switch_find_by_route(tb, ev->route);
if (!sw) {
tb_warn(tb,
"hotplug event from non existent switch %llx:%x (unplug: %d)\n",
ev->route, ev->port, ev->unplug);
goto out;
}
if (ev->port > sw->config.max_port_number) {
tb_warn(tb,
"hotplug event from non existent port %llx:%x (unplug: %d)\n",
ev->route, ev->port, ev->unplug);
goto put_sw;
}
port = &sw->ports[ev->port];
if (tb_is_upstream_port(port)) {
tb_dbg(tb, "hotplug event for upstream port %llx:%x (unplug: %d)\n",
ev->route, ev->port, ev->unplug);
goto put_sw;
}
pm_runtime_get_sync(&sw->dev);
if (ev->unplug) {
tb_retimer_remove_all(port);
if (tb_port_has_remote(port)) {
tb_port_dbg(port, "switch unplugged\n");
tb_sw_set_unplugged(port->remote->sw);
tb_free_invalid_tunnels(tb);
tb_remove_dp_resources(port->remote->sw);
tb_switch_tmu_disable(port->remote->sw);
tb_switch_unconfigure_link(port->remote->sw);
tb_switch_lane_bonding_disable(port->remote->sw);
tb_switch_remove(port->remote->sw);
port->remote = NULL;
if (port->dual_link_port)
port->dual_link_port->remote = NULL;
/* Maybe we can create another DP tunnel */
tb_recalc_estimated_bandwidth(tb);
tb_tunnel_dp(tb);
} else if (port->xdomain) {
struct tb_xdomain *xd = tb_xdomain_get(port->xdomain);
tb_port_dbg(port, "xdomain unplugged\n");
/*
* Service drivers are unbound during
* tb_xdomain_remove() so setting XDomain as
* unplugged here prevents deadlock if they call
* tb_xdomain_disable_paths(). We will tear down
* all the tunnels below.
*/
xd->is_unplugged = true;
tb_xdomain_remove(xd);
port->xdomain = NULL;
__tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1);
tb_xdomain_put(xd);
tb_port_unconfigure_xdomain(port);
} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
tb_dp_resource_unavailable(tb, port);
} else if (!port->port) {
tb_sw_dbg(sw, "xHCI disconnect request\n");
tb_switch_xhci_disconnect(sw);
} else {
tb_port_dbg(port,
"got unplug event for disconnected port, ignoring\n");
}
} else if (port->remote) {
tb_port_dbg(port, "got plug event for connected port, ignoring\n");
} else if (!port->port && sw->authorized) {
tb_sw_dbg(sw, "xHCI connect request\n");
tb_switch_xhci_connect(sw);
} else {
if (tb_port_is_null(port)) {
tb_port_dbg(port, "hotplug: scanning\n");
tb_scan_port(port);
if (!port->remote)
tb_port_dbg(port, "hotplug: no switch found\n");
} else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) {
tb_dp_resource_available(tb, port);
}
}
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
put_sw:
tb_switch_put(sw);
out:
mutex_unlock(&tb->lock);
pm_runtime_mark_last_busy(&tb->dev);
pm_runtime_put_autosuspend(&tb->dev);
kfree(ev);
}
static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
int *requested_down)
{
int allocated_up, allocated_down, available_up, available_down, ret;
int requested_up_corrected, requested_down_corrected, granularity;
int max_up, max_down, max_up_rounded, max_down_rounded;
struct tb *tb = tunnel->tb;
struct tb_port *in, *out;
ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
if (ret)
return ret;
in = tunnel->src_port;
out = tunnel->dst_port;
tb_port_dbg(in, "bandwidth allocated currently %d/%d Mb/s\n",
allocated_up, allocated_down);
/*
* If we get rounded up request from graphics side, say HBR2 x 4
* that is 17500 instead of 17280 (this is because of the
* granularity), we allow it too. Here the graphics has already
* negotiated with the DPRX the maximum possible rates (which is
* 17280 in this case).
*
* Since the link cannot go higher than 17280 we use that in our
* calculations but the DP IN adapter Allocated BW write must be
* the same value (17500) otherwise the adapter will mark it as
* failed for graphics.
*/
ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
if (ret)
return ret;
ret = usb4_dp_port_granularity(in);
if (ret < 0)
return ret;
granularity = ret;
max_up_rounded = roundup(max_up, granularity);
max_down_rounded = roundup(max_down, granularity);
/*
* This will "fix" the request down to the maximum supported
* rate * lanes if it is at the maximum rounded up level.
*/
requested_up_corrected = *requested_up;
if (requested_up_corrected == max_up_rounded)
requested_up_corrected = max_up;
else if (requested_up_corrected < 0)
requested_up_corrected = 0;
requested_down_corrected = *requested_down;
if (requested_down_corrected == max_down_rounded)
requested_down_corrected = max_down;
else if (requested_down_corrected < 0)
requested_down_corrected = 0;
tb_port_dbg(in, "corrected bandwidth request %d/%d Mb/s\n",
requested_up_corrected, requested_down_corrected);
if ((*requested_up >= 0 && requested_up_corrected > max_up_rounded) ||
(*requested_down >= 0 && requested_down_corrected > max_down_rounded)) {
tb_port_dbg(in, "bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
requested_up_corrected, requested_down_corrected,
max_up_rounded, max_down_rounded);
return -ENOBUFS;
}
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
(*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
/*
* If requested bandwidth is less or equal than what is
* currently allocated to that tunnel we simply change
* the reservation of the tunnel. Since all the tunnels
* going out from the same USB4 port are in the same
* group the released bandwidth will be taken into
* account for the other tunnels automatically below.
*/
return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
}
/*
* More bandwidth is requested. Release all the potential
* bandwidth from USB3 first.
*/
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret)
return ret;
/*
* Then go over all tunnels that cross the same USB4 ports (they
* are also in the same group but we use the same function here
* that we use with the normal bandwidth allocation).
*/
ret = tb_available_bandwidth(tb, in, out, &available_up, &available_down);
if (ret)
goto reclaim;
tb_port_dbg(in, "bandwidth available for allocation %d/%d Mb/s\n",
available_up, available_down);
if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
(*requested_down >= 0 && available_down >= requested_down_corrected)) {
ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
} else {
ret = -ENOBUFS;
}
reclaim:
tb_reclaim_usb3_bandwidth(tb, in, out);
return ret;
}
static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
int requested_bw, requested_up, requested_down, ret;
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
pm_runtime_get_sync(&tb->dev);
mutex_lock(&tb->lock);
if (!tcm->hotplug_active)
goto unlock;
sw = tb_switch_find_by_route(tb, ev->route);
if (!sw) {
tb_warn(tb, "bandwidth request from non-existent router %llx\n",
ev->route);
goto unlock;
}
in = &sw->ports[ev->port];
if (!tb_port_is_dpin(in)) {
tb_port_warn(in, "bandwidth request to non-DP IN adapter\n");
goto unlock;
}
tb_port_dbg(in, "handling bandwidth allocation request\n");
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
tb_port_warn(in, "bandwidth allocation mode not enabled\n");
goto unlock;
}
ret = usb4_dp_port_requested_bandwidth(in);
if (ret < 0) {
if (ret == -ENODATA)
tb_port_dbg(in, "no bandwidth request active\n");
else
tb_port_warn(in, "failed to read requested bandwidth\n");
goto unlock;
}
requested_bw = ret;
tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
if (!tunnel) {
tb_port_warn(in, "failed to find tunnel\n");
goto unlock;
}
out = tunnel->dst_port;
if (in->sw->config.depth < out->sw->config.depth) {
requested_up = -1;
requested_down = requested_bw;
} else {
requested_up = requested_bw;
requested_down = -1;
}
ret = tb_alloc_dp_bandwidth(tunnel, &requested_up, &requested_down);
if (ret) {
if (ret == -ENOBUFS)
tb_port_warn(in, "not enough bandwidth available\n");
else
tb_port_warn(in, "failed to change bandwidth allocation\n");
} else {
tb_port_dbg(in, "bandwidth allocation changed to %d/%d Mb/s\n",
requested_up, requested_down);
/* Update other clients about the allocation change */
tb_recalc_estimated_bandwidth(tb);
}
unlock:
mutex_unlock(&tb->lock);
pm_runtime_mark_last_busy(&tb->dev);
pm_runtime_put_autosuspend(&tb->dev);
kfree(ev);
}
static void tb_queue_dp_bandwidth_request(struct tb *tb, u64 route, u8 port)
{
struct tb_hotplug_event *ev;
ev = kmalloc(sizeof(*ev), GFP_KERNEL);
if (!ev)
return;
ev->tb = tb;
ev->route = route;
ev->port = port;
INIT_WORK(&ev->work, tb_handle_dp_bandwidth_request);
queue_work(tb->wq, &ev->work);
}
static void tb_handle_notification(struct tb *tb, u64 route,
const struct cfg_error_pkg *error)
{
switch (error->error) {
case TB_CFG_ERROR_PCIE_WAKE:
case TB_CFG_ERROR_DP_CON_CHANGE:
case TB_CFG_ERROR_DPTX_DISCOVERY:
if (tb_cfg_ack_notification(tb->ctl, route, error))
tb_warn(tb, "could not ack notification on %llx\n",
route);
break;
case TB_CFG_ERROR_DP_BW:
if (tb_cfg_ack_notification(tb->ctl, route, error))
tb_warn(tb, "could not ack notification on %llx\n",
route);
tb_queue_dp_bandwidth_request(tb, route, error->port);
break;
default:
/* Ignore for now */
break;
}
}
/*
* tb_schedule_hotplug_handler() - callback function for the control channel
*
* Delegates to tb_handle_hotplug.
*/
static void tb_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
const struct cfg_event_pkg *pkg = buf;
u64 route = tb_cfg_get_route(&pkg->header);
switch (type) {
case TB_CFG_PKG_ERROR:
tb_handle_notification(tb, route, (const struct cfg_error_pkg *)buf);
return;
case TB_CFG_PKG_EVENT:
break;
default:
tb_warn(tb, "unexpected event %#x, ignoring\n", type);
return;
}
if (tb_cfg_ack_plug(tb->ctl, route, pkg->port, pkg->unplug)) {
tb_warn(tb, "could not ack plug event on %llx:%x\n", route,
pkg->port);
}
tb_queue_hotplug(tb, route, pkg->port, pkg->unplug);
}
static void tb_stop(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
struct tb_tunnel *n;
cancel_delayed_work(&tcm->remove_work);
/* tunnels are only present after everything has been initialized */
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
/*
* DMA tunnels require the driver to be functional so we
* tear them down. Other protocol tunnels can be left
* intact.
*/
if (tb_tunnel_is_dma(tunnel))
tb_tunnel_deactivate(tunnel);
tb_tunnel_free(tunnel);
}
tb_switch_remove(tb->root_switch);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
}
static int tb_scan_finalize_switch(struct device *dev, void *data)
{
if (tb_is_switch(dev)) {
struct tb_switch *sw = tb_to_switch(dev);
/*
* If we found that the switch was already setup by the
* boot firmware, mark it as authorized now before we
* send uevent to userspace.
*/
if (sw->boot)
sw->authorized = 1;
dev_set_uevent_suppress(dev, false);
kobject_uevent(&dev->kobj, KOBJ_ADD);
device_for_each_child(dev, NULL, tb_scan_finalize_switch);
}
return 0;
}
static int tb_start(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
int ret;
tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
if (IS_ERR(tb->root_switch))
return PTR_ERR(tb->root_switch);
/*
* ICM firmware upgrade needs running firmware and in native
* mode that is not available so disable firmware upgrade of the
* root switch.
*
* However, USB4 routers support NVM firmware upgrade if they
* implement the necessary router operations.
*/
tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch);
/* All USB4 routers support runtime PM */
tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch);
ret = tb_switch_configure(tb->root_switch);
if (ret) {
tb_switch_put(tb->root_switch);
return ret;
}
/* Announce the switch to the world */
ret = tb_switch_add(tb->root_switch);
if (ret) {
tb_switch_put(tb->root_switch);
return ret;
}
/*
* To support highest CLx state, we set host router's TMU to
* Normal mode.
*/
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
tb_discover_tunnels(tb);
/* Add DP resources from the DP tunnels created by the boot firmware */
tb_discover_dp_resources(tb);
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
*/
tb_create_usb3_tunnels(tb->root_switch);
/* Add DP IN resources for the root switch */
tb_add_dp_resources(tb->root_switch);
/* Make the discovered switches available to the userspace */
device_for_each_child(&tb->root_switch->dev, NULL,
tb_scan_finalize_switch);
/* Allow tb_handle_hotplug to progress events */
tcm->hotplug_active = true;
return 0;
}
static int tb_suspend_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
tb_dbg(tb, "suspending...\n");
tb_disconnect_and_release_dp(tb);
tb_switch_suspend(tb->root_switch, false);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
tb_dbg(tb, "suspend finished\n");
return 0;
}
static void tb_restore_children(struct tb_switch *sw)
{
struct tb_port *port;
/* No need to restore if the router is already unplugged */
if (sw->is_unplugged)
return;
if (tb_enable_clx(sw))
tb_sw_warn(sw, "failed to re-enable CL states\n");
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to restore TMU configuration\n");
tb_switch_configuration_valid(sw);
tb_switch_for_each_port(sw, port) {
if (!tb_port_has_remote(port) && !port->xdomain)
continue;
if (port->remote) {
tb_switch_lane_bonding_enable(port->remote->sw);
tb_switch_configure_link(port->remote->sw);
tb_restore_children(port->remote->sw);
} else if (port->xdomain) {
tb_port_configure_xdomain(port, port->xdomain);
}
}
}
static int tb_resume_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel, *n;
unsigned int usb3_delay = 0;
LIST_HEAD(tunnels);
tb_dbg(tb, "resuming...\n");
/* remove any pci devices the firmware might have setup */
tb_switch_reset(tb->root_switch);
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
/*
* If we get here from suspend to disk the boot firmware or the
* restore kernel might have created tunnels of its own. Since
* we cannot be sure they are usable for us we find and tear
* them down.
*/
tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
if (tb_tunnel_is_usb3(tunnel))
usb3_delay = 500;
tb_tunnel_deactivate(tunnel);
tb_tunnel_free(tunnel);
}
/* Re-create our tunnels now */
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
/* USB3 requires delay before it can be re-activated */
if (tb_tunnel_is_usb3(tunnel)) {
msleep(usb3_delay);
/* Only need to do it once */
usb3_delay = 0;
}
tb_tunnel_restart(tunnel);
}
if (!list_empty(&tcm->tunnel_list)) {
/*
* the pcie links need some time to get going.
* 100ms works for me...
*/
tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
msleep(100);
}
/* Allow tb_handle_hotplug to progress events */
tcm->hotplug_active = true;
tb_dbg(tb, "resume finished\n");
return 0;
}
static int tb_free_unplugged_xdomains(struct tb_switch *sw)
{
struct tb_port *port;
int ret = 0;
tb_switch_for_each_port(sw, port) {
if (tb_is_upstream_port(port))
continue;
if (port->xdomain && port->xdomain->is_unplugged) {
tb_retimer_remove_all(port);
tb_xdomain_remove(port->xdomain);
tb_port_unconfigure_xdomain(port);
port->xdomain = NULL;
ret++;
} else if (port->remote) {
ret += tb_free_unplugged_xdomains(port->remote->sw);
}
}
return ret;
}
static int tb_freeze_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
tcm->hotplug_active = false;
return 0;
}
static int tb_thaw_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
tcm->hotplug_active = true;
return 0;
}
static void tb_complete(struct tb *tb)
{
/*
* Release any unplugged XDomains and if there is a case where
* another domain is swapped in place of unplugged XDomain we
* need to run another rescan.
*/
mutex_lock(&tb->lock);
if (tb_free_unplugged_xdomains(tb->root_switch))
tb_scan_switch(tb->root_switch);
mutex_unlock(&tb->lock);
}
static int tb_runtime_suspend(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
mutex_lock(&tb->lock);
tb_switch_suspend(tb->root_switch, true);
tcm->hotplug_active = false;
mutex_unlock(&tb->lock);
return 0;
}
static void tb_remove_work(struct work_struct *work)
{
struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work);
struct tb *tb = tcm_to_tb(tcm);
mutex_lock(&tb->lock);
if (tb->root_switch) {
tb_free_unplugged_children(tb->root_switch);
tb_free_unplugged_xdomains(tb->root_switch);
}
mutex_unlock(&tb->lock);
}
static int tb_runtime_resume(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel, *n;
mutex_lock(&tb->lock);
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
tb_restore_children(tb->root_switch);
list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
tb_tunnel_restart(tunnel);
tcm->hotplug_active = true;
mutex_unlock(&tb->lock);
/*
* Schedule cleanup of any unplugged devices. Run this in a
* separate thread to avoid possible deadlock if the device
* removal runtime resumes the unplugged device.
*/
queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50));
return 0;
}
static const struct tb_cm_ops tb_cm_ops = {
.start = tb_start,
.stop = tb_stop,
.suspend_noirq = tb_suspend_noirq,
.resume_noirq = tb_resume_noirq,
.freeze_noirq = tb_freeze_noirq,
.thaw_noirq = tb_thaw_noirq,
.complete = tb_complete,
.runtime_suspend = tb_runtime_suspend,
.runtime_resume = tb_runtime_resume,
.handle_event = tb_handle_event,
.disapprove_switch = tb_disconnect_pci,
.approve_switch = tb_tunnel_pci,
.approve_xdomain_paths = tb_approve_xdomain_paths,
.disconnect_xdomain_paths = tb_disconnect_xdomain_paths,
};
/*
* During suspend the Thunderbolt controller is reset and all PCIe
* tunnels are lost. The NHI driver will try to reestablish all tunnels
* during resume. This adds device links between the tunneled PCIe
* downstream ports and the NHI so that the device core will make sure
* NHI is resumed first before the rest.
*/
static bool tb_apple_add_links(struct tb_nhi *nhi)
{
struct pci_dev *upstream, *pdev;
bool ret;
if (!x86_apple_machine)
return false;
switch (nhi->pdev->device) {
case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
break;
default:
return false;
}
upstream = pci_upstream_bridge(nhi->pdev);
while (upstream) {
if (!pci_is_pcie(upstream))
return false;
if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM)
break;
upstream = pci_upstream_bridge(upstream);
}
if (!upstream)
return false;
/*
* For each hotplug downstream port, create add device link
* back to NHI so that PCIe tunnels can be re-established after
* sleep.
*/
ret = false;
for_each_pci_bridge(pdev, upstream->subordinate) {
const struct device_link *link;
if (!pci_is_pcie(pdev))
continue;
if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM ||
!pdev->is_hotplug_bridge)
continue;
link = device_link_add(&pdev->dev, &nhi->pdev->dev,
DL_FLAG_AUTOREMOVE_SUPPLIER |
DL_FLAG_PM_RUNTIME);
if (link) {
dev_dbg(&nhi->pdev->dev, "created link from %s\n",
dev_name(&pdev->dev));
ret = true;
} else {
dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
dev_name(&pdev->dev));
}
}
return ret;
}
struct tb *tb_probe(struct tb_nhi *nhi)
{
struct tb_cm *tcm;
struct tb *tb;
tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm));
if (!tb)
return NULL;
if (tb_acpi_may_tunnel_pcie())
tb->security_level = TB_SECURITY_USER;
else
tb->security_level = TB_SECURITY_NOPCIE;
tb->cm_ops = &tb_cm_ops;
tcm = tb_priv(tb);
INIT_LIST_HEAD(&tcm->tunnel_list);
INIT_LIST_HEAD(&tcm->dp_resources);
INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work);
tb_init_bandwidth_groups(tcm);
tb_dbg(tb, "using software connection manager\n");
/*
* Device links are needed to make sure we establish tunnels
* before the PCIe/USB stack is resumed so complain here if we
* found them missing.
*/
if (!tb_apple_add_links(nhi) && !tb_acpi_add_links(nhi))
tb_warn(tb, "device links to tunneled native ports are missing!\n");
return tb;
}
| linux-master | drivers/thunderbolt/tb.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt driver - switch/port utility functions
*
* Copyright (c) 2014 Andreas Noever <[email protected]>
* Copyright (C) 2018, Intel Corporation
*/
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/pm_runtime.h>
#include <linux/sched/signal.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
#include "tb.h"
/* Switch NVM support */
struct nvm_auth_status {
struct list_head list;
uuid_t uuid;
u32 status;
};
/*
* Hold NVM authentication failure status per switch This information
* needs to stay around even when the switch gets power cycled so we
* keep it separately.
*/
static LIST_HEAD(nvm_auth_status_cache);
static DEFINE_MUTEX(nvm_auth_status_lock);
static struct nvm_auth_status *__nvm_get_auth_status(const struct tb_switch *sw)
{
struct nvm_auth_status *st;
list_for_each_entry(st, &nvm_auth_status_cache, list) {
if (uuid_equal(&st->uuid, sw->uuid))
return st;
}
return NULL;
}
static void nvm_get_auth_status(const struct tb_switch *sw, u32 *status)
{
struct nvm_auth_status *st;
mutex_lock(&nvm_auth_status_lock);
st = __nvm_get_auth_status(sw);
mutex_unlock(&nvm_auth_status_lock);
*status = st ? st->status : 0;
}
static void nvm_set_auth_status(const struct tb_switch *sw, u32 status)
{
struct nvm_auth_status *st;
if (WARN_ON(!sw->uuid))
return;
mutex_lock(&nvm_auth_status_lock);
st = __nvm_get_auth_status(sw);
if (!st) {
st = kzalloc(sizeof(*st), GFP_KERNEL);
if (!st)
goto unlock;
memcpy(&st->uuid, sw->uuid, sizeof(st->uuid));
INIT_LIST_HEAD(&st->list);
list_add_tail(&st->list, &nvm_auth_status_cache);
}
st->status = status;
unlock:
mutex_unlock(&nvm_auth_status_lock);
}
static void nvm_clear_auth_status(const struct tb_switch *sw)
{
struct nvm_auth_status *st;
mutex_lock(&nvm_auth_status_lock);
st = __nvm_get_auth_status(sw);
if (st) {
list_del(&st->list);
kfree(st);
}
mutex_unlock(&nvm_auth_status_lock);
}
static int nvm_validate_and_write(struct tb_switch *sw)
{
unsigned int image_size;
const u8 *buf;
int ret;
ret = tb_nvm_validate(sw->nvm);
if (ret)
return ret;
ret = tb_nvm_write_headers(sw->nvm);
if (ret)
return ret;
buf = sw->nvm->buf_data_start;
image_size = sw->nvm->buf_data_size;
if (tb_switch_is_usb4(sw))
ret = usb4_switch_nvm_write(sw, 0, buf, image_size);
else
ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size);
if (ret)
return ret;
sw->nvm->flushed = true;
return 0;
}
static int nvm_authenticate_host_dma_port(struct tb_switch *sw)
{
int ret = 0;
/*
* Root switch NVM upgrade requires that we disconnect the
* existing paths first (in case it is not in safe mode
* already).
*/
if (!sw->safe_mode) {
u32 status;
ret = tb_domain_disconnect_all_paths(sw->tb);
if (ret)
return ret;
/*
* The host controller goes away pretty soon after this if
* everything goes well so getting timeout is expected.
*/
ret = dma_port_flash_update_auth(sw->dma_port);
if (!ret || ret == -ETIMEDOUT)
return 0;
/*
* Any error from update auth operation requires power
* cycling of the host router.
*/
tb_sw_warn(sw, "failed to authenticate NVM, power cycling\n");
if (dma_port_flash_update_auth_status(sw->dma_port, &status) > 0)
nvm_set_auth_status(sw, status);
}
/*
* From safe mode we can get out by just power cycling the
* switch.
*/
dma_port_power_cycle(sw->dma_port);
return ret;
}
static int nvm_authenticate_device_dma_port(struct tb_switch *sw)
{
int ret, retries = 10;
ret = dma_port_flash_update_auth(sw->dma_port);
switch (ret) {
case 0:
case -ETIMEDOUT:
case -EACCES:
case -EINVAL:
/* Power cycle is required */
break;
default:
return ret;
}
/*
* Poll here for the authentication status. It takes some time
* for the device to respond (we get timeout for a while). Once
* we get response the device needs to be power cycled in order
* to the new NVM to be taken into use.
*/
do {
u32 status;
ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
if (ret < 0 && ret != -ETIMEDOUT)
return ret;
if (ret > 0) {
if (status) {
tb_sw_warn(sw, "failed to authenticate NVM\n");
nvm_set_auth_status(sw, status);
}
tb_sw_info(sw, "power cycling the switch now\n");
dma_port_power_cycle(sw->dma_port);
return 0;
}
msleep(500);
} while (--retries);
return -ETIMEDOUT;
}
static void nvm_authenticate_start_dma_port(struct tb_switch *sw)
{
struct pci_dev *root_port;
/*
* During host router NVM upgrade we should not allow root port to
* go into D3cold because some root ports cannot trigger PME
* itself. To be on the safe side keep the root port in D0 during
* the whole upgrade process.
*/
root_port = pcie_find_root_port(sw->tb->nhi->pdev);
if (root_port)
pm_runtime_get_noresume(&root_port->dev);
}
static void nvm_authenticate_complete_dma_port(struct tb_switch *sw)
{
struct pci_dev *root_port;
root_port = pcie_find_root_port(sw->tb->nhi->pdev);
if (root_port)
pm_runtime_put(&root_port->dev);
}
static inline bool nvm_readable(struct tb_switch *sw)
{
if (tb_switch_is_usb4(sw)) {
/*
* USB4 devices must support NVM operations but it is
* optional for hosts. Therefore we query the NVM sector
* size here and if it is supported assume NVM
* operations are implemented.
*/
return usb4_switch_nvm_sector_size(sw) > 0;
}
/* Thunderbolt 2 and 3 devices support NVM through DMA port */
return !!sw->dma_port;
}
static inline bool nvm_upgradeable(struct tb_switch *sw)
{
if (sw->no_nvm_upgrade)
return false;
return nvm_readable(sw);
}
static int nvm_authenticate(struct tb_switch *sw, bool auth_only)
{
int ret;
if (tb_switch_is_usb4(sw)) {
if (auth_only) {
ret = usb4_switch_nvm_set_offset(sw, 0);
if (ret)
return ret;
}
sw->nvm->authenticating = true;
return usb4_switch_nvm_authenticate(sw);
}
if (auth_only)
return -EOPNOTSUPP;
sw->nvm->authenticating = true;
if (!tb_route(sw)) {
nvm_authenticate_start_dma_port(sw);
ret = nvm_authenticate_host_dma_port(sw);
} else {
ret = nvm_authenticate_device_dma_port(sw);
}
return ret;
}
/**
* tb_switch_nvm_read() - Read router NVM
* @sw: Router whose NVM to read
* @address: Start address on the NVM
* @buf: Buffer where the read data is copied
* @size: Size of the buffer in bytes
*
* Reads from router NVM and returns the requested data in @buf. Locking
* is up to the caller. Returns %0 in success and negative errno in case
* of failure.
*/
int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size)
{
if (tb_switch_is_usb4(sw))
return usb4_switch_nvm_read(sw, address, buf, size);
return dma_port_flash_read(sw->dma_port, address, buf, size);
}
static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_switch *sw = tb_to_switch(nvm->dev);
int ret;
pm_runtime_get_sync(&sw->dev);
if (!mutex_trylock(&sw->tb->lock)) {
ret = restart_syscall();
goto out;
}
ret = tb_switch_nvm_read(sw, offset, val, bytes);
mutex_unlock(&sw->tb->lock);
out:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_switch *sw = tb_to_switch(nvm->dev);
int ret;
if (!mutex_trylock(&sw->tb->lock))
return restart_syscall();
/*
* Since writing the NVM image might require some special steps,
* for example when CSS headers are written, we cache the image
* locally here and handle the special cases when the user asks
* us to authenticate the image.
*/
ret = tb_nvm_write_buf(nvm, offset, val, bytes);
mutex_unlock(&sw->tb->lock);
return ret;
}
static int tb_switch_nvm_add(struct tb_switch *sw)
{
struct tb_nvm *nvm;
int ret;
if (!nvm_readable(sw))
return 0;
nvm = tb_nvm_alloc(&sw->dev);
if (IS_ERR(nvm)) {
ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
goto err_nvm;
}
ret = tb_nvm_read_version(nvm);
if (ret)
goto err_nvm;
/*
* If the switch is in safe-mode the only accessible portion of
* the NVM is the non-active one where userspace is expected to
* write new functional NVM.
*/
if (!sw->safe_mode) {
ret = tb_nvm_add_active(nvm, nvm_read);
if (ret)
goto err_nvm;
}
if (!sw->no_nvm_upgrade) {
ret = tb_nvm_add_non_active(nvm, nvm_write);
if (ret)
goto err_nvm;
}
sw->nvm = nvm;
return 0;
err_nvm:
tb_sw_dbg(sw, "NVM upgrade disabled\n");
sw->no_nvm_upgrade = true;
if (!IS_ERR(nvm))
tb_nvm_free(nvm);
return ret;
}
static void tb_switch_nvm_remove(struct tb_switch *sw)
{
struct tb_nvm *nvm;
nvm = sw->nvm;
sw->nvm = NULL;
if (!nvm)
return;
/* Remove authentication status in case the switch is unplugged */
if (!nvm->authenticating)
nvm_clear_auth_status(sw);
tb_nvm_free(nvm);
}
/* port utility functions */
static const char *tb_port_type(const struct tb_regs_port_header *port)
{
switch (port->type >> 16) {
case 0:
switch ((u8) port->type) {
case 0:
return "Inactive";
case 1:
return "Port";
case 2:
return "NHI";
default:
return "unknown";
}
case 0x2:
return "Ethernet";
case 0x8:
return "SATA";
case 0xe:
return "DP/HDMI";
case 0x10:
return "PCIe";
case 0x20:
return "USB";
default:
return "unknown";
}
}
static void tb_dump_port(struct tb *tb, const struct tb_port *port)
{
const struct tb_regs_port_header *regs = &port->config;
tb_dbg(tb,
" Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
regs->port_number, regs->vendor_id, regs->device_id,
regs->revision, regs->thunderbolt_version, tb_port_type(regs),
regs->type);
tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
regs->max_in_hop_id, regs->max_out_hop_id);
tb_dbg(tb, " Max counters: %d\n", regs->max_counters);
tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits);
tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits,
port->ctl_credits);
}
/**
* tb_port_state() - get connectedness state of a port
* @port: the port to check
*
* The port must have a TB_CAP_PHY (i.e. it should be a real port).
*
* Return: Returns an enum tb_port_state on success or an error code on failure.
*/
int tb_port_state(struct tb_port *port)
{
struct tb_cap_phy phy;
int res;
if (port->cap_phy == 0) {
tb_port_WARN(port, "does not have a PHY\n");
return -EINVAL;
}
res = tb_port_read(port, &phy, TB_CFG_PORT, port->cap_phy, 2);
if (res)
return res;
return phy.state;
}
/**
* tb_wait_for_port() - wait for a port to become ready
* @port: Port to wait
* @wait_if_unplugged: Wait also when port is unplugged
*
* Wait up to 1 second for a port to reach state TB_PORT_UP. If
* wait_if_unplugged is set then we also wait if the port is in state
* TB_PORT_UNPLUGGED (it takes a while for the device to be registered after
* switch resume). Otherwise we only wait if a device is registered but the link
* has not yet been established.
*
* Return: Returns an error code on failure. Returns 0 if the port is not
* connected or failed to reach state TB_PORT_UP within one second. Returns 1
* if the port is connected and in state TB_PORT_UP.
*/
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged)
{
int retries = 10;
int state;
if (!port->cap_phy) {
tb_port_WARN(port, "does not have PHY\n");
return -EINVAL;
}
if (tb_is_upstream_port(port)) {
tb_port_WARN(port, "is the upstream port\n");
return -EINVAL;
}
while (retries--) {
state = tb_port_state(port);
switch (state) {
case TB_PORT_DISABLED:
tb_port_dbg(port, "is disabled (state: 0)\n");
return 0;
case TB_PORT_UNPLUGGED:
if (wait_if_unplugged) {
/* used during resume */
tb_port_dbg(port,
"is unplugged (state: 7), retrying...\n");
msleep(100);
break;
}
tb_port_dbg(port, "is unplugged (state: 7)\n");
return 0;
case TB_PORT_UP:
case TB_PORT_TX_CL0S:
case TB_PORT_RX_CL0S:
case TB_PORT_CL1:
case TB_PORT_CL2:
tb_port_dbg(port, "is connected, link is up (state: %d)\n", state);
return 1;
default:
if (state < 0)
return state;
/*
* After plug-in the state is TB_PORT_CONNECTING. Give it some
* time.
*/
tb_port_dbg(port,
"is connected, link is not up (state: %d), retrying...\n",
state);
msleep(100);
}
}
tb_port_warn(port,
"failed to reach state TB_PORT_UP. Ignoring port...\n");
return 0;
}
/**
* tb_port_add_nfc_credits() - add/remove non flow controlled credits to port
* @port: Port to add/remove NFC credits
* @credits: Credits to add/remove
*
* Change the number of NFC credits allocated to @port by @credits. To remove
* NFC credits pass a negative amount of credits.
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_port_add_nfc_credits(struct tb_port *port, int credits)
{
u32 nfc_credits;
if (credits == 0 || port->sw->is_unplugged)
return 0;
/*
* USB4 restricts programming NFC buffers to lane adapters only
* so skip other ports.
*/
if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port))
return 0;
nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
if (credits < 0)
credits = max_t(int, -nfc_credits, credits);
nfc_credits += credits;
tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK);
port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK;
port->config.nfc_credits |= nfc_credits;
return tb_port_write(port, &port->config.nfc_credits,
TB_CFG_PORT, ADP_CS_4, 1);
}
/**
* tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER
* @port: Port whose counters to clear
* @counter: Counter index to clear
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_port_clear_counter(struct tb_port *port, int counter)
{
u32 zero[3] = { 0, 0, 0 };
tb_port_dbg(port, "clearing counter %d\n", counter);
return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3);
}
/**
* tb_port_unlock() - Unlock downstream port
* @port: Port to unlock
*
* Needed for USB4 but can be called for any CIO/USB4 ports. Makes the
* downstream router accessible for CM.
*/
int tb_port_unlock(struct tb_port *port)
{
if (tb_switch_is_icm(port->sw))
return 0;
if (!tb_port_is_null(port))
return -EINVAL;
if (tb_switch_is_usb4(port->sw))
return usb4_port_unlock(port);
return 0;
}
static int __tb_port_enable(struct tb_port *port, bool enable)
{
int ret;
u32 phy;
if (!tb_port_is_null(port))
return -EINVAL;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (enable)
phy &= ~LANE_ADP_CS_1_LD;
else
phy |= LANE_ADP_CS_1_LD;
ret = tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
tb_port_dbg(port, "lane %s\n", str_enabled_disabled(enable));
return 0;
}
/**
* tb_port_enable() - Enable lane adapter
* @port: Port to enable (can be %NULL)
*
* This is used for lane 0 and 1 adapters to enable it.
*/
int tb_port_enable(struct tb_port *port)
{
return __tb_port_enable(port, true);
}
/**
* tb_port_disable() - Disable lane adapter
* @port: Port to disable (can be %NULL)
*
* This is used for lane 0 and 1 adapters to disable it.
*/
int tb_port_disable(struct tb_port *port)
{
return __tb_port_enable(port, false);
}
/*
* tb_init_port() - initialize a port
*
* This is a helper method for tb_switch_alloc. Does not check or initialize
* any downstream switches.
*
* Return: Returns 0 on success or an error code on failure.
*/
static int tb_init_port(struct tb_port *port)
{
int res;
int cap;
INIT_LIST_HEAD(&port->list);
/* Control adapter does not have configuration space */
if (!port->port)
return 0;
res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8);
if (res) {
if (res == -ENODEV) {
tb_dbg(port->sw->tb, " Port %d: not implemented\n",
port->port);
port->disabled = true;
return 0;
}
return res;
}
/* Port 0 is the switch itself and has no PHY. */
if (port->config.type == TB_TYPE_PORT) {
cap = tb_port_find_cap(port, TB_PORT_CAP_PHY);
if (cap > 0)
port->cap_phy = cap;
else
tb_port_WARN(port, "non switch port without a PHY\n");
cap = tb_port_find_cap(port, TB_PORT_CAP_USB4);
if (cap > 0)
port->cap_usb4 = cap;
/*
* USB4 ports the buffers allocated for the control path
* can be read from the path config space. Legacy
* devices we use hard-coded value.
*/
if (port->cap_usb4) {
struct tb_regs_hop hop;
if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2))
port->ctl_credits = hop.initial_credits;
}
if (!port->ctl_credits)
port->ctl_credits = 2;
} else {
cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP);
if (cap > 0)
port->cap_adap = cap;
}
port->total_credits =
(port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
ADP_CS_4_TOTAL_BUFFERS_SHIFT;
tb_dump_port(port->sw->tb, port);
return 0;
}
static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
int max_hopid)
{
int port_max_hopid;
struct ida *ida;
if (in) {
port_max_hopid = port->config.max_in_hop_id;
ida = &port->in_hopids;
} else {
port_max_hopid = port->config.max_out_hop_id;
ida = &port->out_hopids;
}
/*
* NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are
* reserved.
*/
if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID)
min_hopid = TB_PATH_MIN_HOPID;
if (max_hopid < 0 || max_hopid > port_max_hopid)
max_hopid = port_max_hopid;
return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
}
/**
* tb_port_alloc_in_hopid() - Allocate input HopID from port
* @port: Port to allocate HopID for
* @min_hopid: Minimum acceptable input HopID
* @max_hopid: Maximum acceptable input HopID
*
* Return: HopID between @min_hopid and @max_hopid or negative errno in
* case of error.
*/
int tb_port_alloc_in_hopid(struct tb_port *port, int min_hopid, int max_hopid)
{
return tb_port_alloc_hopid(port, true, min_hopid, max_hopid);
}
/**
* tb_port_alloc_out_hopid() - Allocate output HopID from port
* @port: Port to allocate HopID for
* @min_hopid: Minimum acceptable output HopID
* @max_hopid: Maximum acceptable output HopID
*
* Return: HopID between @min_hopid and @max_hopid or negative errno in
* case of error.
*/
int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
{
return tb_port_alloc_hopid(port, false, min_hopid, max_hopid);
}
/**
* tb_port_release_in_hopid() - Release allocated input HopID from port
* @port: Port whose HopID to release
* @hopid: HopID to release
*/
void tb_port_release_in_hopid(struct tb_port *port, int hopid)
{
ida_simple_remove(&port->in_hopids, hopid);
}
/**
* tb_port_release_out_hopid() - Release allocated output HopID from port
* @port: Port whose HopID to release
* @hopid: HopID to release
*/
void tb_port_release_out_hopid(struct tb_port *port, int hopid)
{
ida_simple_remove(&port->out_hopids, hopid);
}
static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
const struct tb_switch *sw)
{
u64 mask = (1ULL << parent->config.depth * 8) - 1;
return (tb_route(parent) & mask) == (tb_route(sw) & mask);
}
/**
* tb_next_port_on_path() - Return next port for given port on a path
* @start: Start port of the walk
* @end: End port of the walk
* @prev: Previous port (%NULL if this is the first)
*
* This function can be used to walk from one port to another if they
* are connected through zero or more switches. If the @prev is dual
* link port, the function follows that link and returns another end on
* that same link.
*
* If the @end port has been reached, return %NULL.
*
* Domain tb->lock must be held when this function is called.
*/
struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end,
struct tb_port *prev)
{
struct tb_port *next;
if (!prev)
return start;
if (prev->sw == end->sw) {
if (prev == end)
return NULL;
return end;
}
if (tb_switch_is_reachable(prev->sw, end->sw)) {
next = tb_port_at(tb_route(end->sw), prev->sw);
/* Walk down the topology if next == prev */
if (prev->remote &&
(next == prev || next->dual_link_port == prev))
next = prev->remote;
} else {
if (tb_is_upstream_port(prev)) {
next = prev->remote;
} else {
next = tb_upstream_port(prev->sw);
/*
* Keep the same link if prev and next are both
* dual link ports.
*/
if (next->dual_link_port &&
next->link_nr != prev->link_nr) {
next = next->dual_link_port;
}
}
}
return next != prev ? next : NULL;
}
/**
* tb_port_get_link_speed() - Get current link speed
* @port: Port to check (USB4 or CIO)
*
* Returns link speed in Gb/s or negative errno in case of failure.
*/
int tb_port_get_link_speed(struct tb_port *port)
{
u32 val, speed;
int ret;
if (!port->cap_phy)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >>
LANE_ADP_CS_1_CURRENT_SPEED_SHIFT;
switch (speed) {
case LANE_ADP_CS_1_CURRENT_SPEED_GEN4:
return 40;
case LANE_ADP_CS_1_CURRENT_SPEED_GEN3:
return 20;
default:
return 10;
}
}
/**
* tb_port_get_link_width() - Get current link width
* @port: Port to check (USB4 or CIO)
*
* Returns link width. Return the link width as encoded in &enum
* tb_link_width or negative errno in case of failure.
*/
int tb_port_get_link_width(struct tb_port *port)
{
u32 val;
int ret;
if (!port->cap_phy)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
/* Matches the values in enum tb_link_width */
return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >>
LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT;
}
static bool tb_port_is_width_supported(struct tb_port *port,
unsigned int width_mask)
{
u32 phy, widths;
int ret;
if (!port->cap_phy)
return false;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_0, 1);
if (ret)
return false;
widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
return widths & width_mask;
}
static bool is_gen4_link(struct tb_port *port)
{
return tb_port_get_link_speed(port) > 20;
}
/**
* tb_port_set_link_width() - Set target link width of the lane adapter
* @port: Lane adapter
* @width: Target link width
*
* Sets the target link width of the lane adapter to @width. Does not
* enable/disable lane bonding. For that call tb_port_set_lane_bonding().
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_port_set_link_width(struct tb_port *port, enum tb_link_width width)
{
u32 val;
int ret;
if (!port->cap_phy)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK;
switch (width) {
case TB_LINK_WIDTH_SINGLE:
/* Gen 4 link cannot be single */
if (is_gen4_link(port))
return -EOPNOTSUPP;
val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE <<
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
break;
case TB_LINK_WIDTH_DUAL:
val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL <<
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
break;
default:
return -EINVAL;
}
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
/**
* tb_port_set_lane_bonding() - Enable/disable lane bonding
* @port: Lane adapter
* @bonding: enable/disable bonding
*
* Enables or disables lane bonding. This should be called after target
* link width has been set (tb_port_set_link_width()). Note in most
* cases one should use tb_port_lane_bonding_enable() instead to enable
* lane bonding.
*
* Return: %0 in case of success and negative errno in case of error
*/
static int tb_port_set_lane_bonding(struct tb_port *port, bool bonding)
{
u32 val;
int ret;
if (!port->cap_phy)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (bonding)
val |= LANE_ADP_CS_1_LB;
else
val &= ~LANE_ADP_CS_1_LB;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
/**
* tb_port_lane_bonding_enable() - Enable bonding on port
* @port: port to enable
*
* Enable bonding by setting the link width of the port and the other
* port in case of dual link port. Does not wait for the link to
* actually reach the bonded state so caller needs to call
* tb_port_wait_for_link_width() before enabling any paths through the
* link to make sure the link is in expected state.
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_port_lane_bonding_enable(struct tb_port *port)
{
enum tb_link_width width;
int ret;
/*
* Enable lane bonding for both links if not already enabled by
* for example the boot firmware.
*/
width = tb_port_get_link_width(port);
if (width == TB_LINK_WIDTH_SINGLE) {
ret = tb_port_set_link_width(port, TB_LINK_WIDTH_DUAL);
if (ret)
goto err_lane0;
}
width = tb_port_get_link_width(port->dual_link_port);
if (width == TB_LINK_WIDTH_SINGLE) {
ret = tb_port_set_link_width(port->dual_link_port,
TB_LINK_WIDTH_DUAL);
if (ret)
goto err_lane0;
}
/*
* Only set bonding if the link was not already bonded. This
* avoids the lane adapter to re-enter bonding state.
*/
if (width == TB_LINK_WIDTH_SINGLE) {
ret = tb_port_set_lane_bonding(port, true);
if (ret)
goto err_lane1;
}
/*
* When lane 0 bonding is set it will affect lane 1 too so
* update both.
*/
port->bonded = true;
port->dual_link_port->bonded = true;
return 0;
err_lane1:
tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
err_lane0:
tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
return ret;
}
/**
* tb_port_lane_bonding_disable() - Disable bonding on port
* @port: port to disable
*
* Disable bonding by setting the link width of the port and the
* other port in case of dual link port.
*/
void tb_port_lane_bonding_disable(struct tb_port *port)
{
tb_port_set_lane_bonding(port, false);
tb_port_set_link_width(port->dual_link_port, TB_LINK_WIDTH_SINGLE);
tb_port_set_link_width(port, TB_LINK_WIDTH_SINGLE);
port->dual_link_port->bonded = false;
port->bonded = false;
}
/**
* tb_port_wait_for_link_width() - Wait until link reaches specific width
* @port: Port to wait for
* @width_mask: Expected link width mask
* @timeout_msec: Timeout in ms how long to wait
*
* Should be used after both ends of the link have been bonded (or
* bonding has been disabled) to wait until the link actually reaches
* the expected state. Returns %-ETIMEDOUT if the width was not reached
* within the given timeout, %0 if it did. Can be passed a mask of
* expected widths and succeeds if any of the widths is reached.
*/
int tb_port_wait_for_link_width(struct tb_port *port, unsigned int width_mask,
int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
int ret;
/* Gen 4 link does not support single lane */
if ((width_mask & TB_LINK_WIDTH_SINGLE) && is_gen4_link(port))
return -EOPNOTSUPP;
do {
ret = tb_port_get_link_width(port);
if (ret < 0) {
/*
* Sometimes we get port locked error when
* polling the lanes so we can ignore it and
* retry.
*/
if (ret != -EACCES)
return ret;
} else if (ret & width_mask) {
return 0;
}
usleep_range(1000, 2000);
} while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
}
static int tb_port_do_update_credits(struct tb_port *port)
{
u32 nfc_credits;
int ret;
ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1);
if (ret)
return ret;
if (nfc_credits != port->config.nfc_credits) {
u32 total;
total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >>
ADP_CS_4_TOTAL_BUFFERS_SHIFT;
tb_port_dbg(port, "total credits changed %u -> %u\n",
port->total_credits, total);
port->config.nfc_credits = nfc_credits;
port->total_credits = total;
}
return 0;
}
/**
* tb_port_update_credits() - Re-read port total credits
* @port: Port to update
*
* After the link is bonded (or bonding was disabled) the port total
* credits may change, so this function needs to be called to re-read
* the credits. Updates also the second lane adapter.
*/
int tb_port_update_credits(struct tb_port *port)
{
int ret;
ret = tb_port_do_update_credits(port);
if (ret)
return ret;
return tb_port_do_update_credits(port->dual_link_port);
}
static int tb_port_start_lane_initialization(struct tb_port *port)
{
int ret;
if (tb_switch_is_usb4(port->sw))
return 0;
ret = tb_lc_start_lane_initialization(port);
return ret == -EINVAL ? 0 : ret;
}
/*
* Returns true if the port had something (router, XDomain) connected
* before suspend.
*/
static bool tb_port_resume(struct tb_port *port)
{
bool has_remote = tb_port_has_remote(port);
if (port->usb4) {
usb4_port_device_resume(port->usb4);
} else if (!has_remote) {
/*
* For disconnected downstream lane adapters start lane
* initialization now so we detect future connects.
*
* For XDomain start the lane initialzation now so the
* link gets re-established.
*
* This is only needed for non-USB4 ports.
*/
if (!tb_is_upstream_port(port) || port->xdomain)
tb_port_start_lane_initialization(port);
}
return has_remote || port->xdomain;
}
/**
* tb_port_is_enabled() - Is the adapter port enabled
* @port: Port to check
*/
bool tb_port_is_enabled(struct tb_port *port)
{
switch (port->config.type) {
case TB_TYPE_PCIE_UP:
case TB_TYPE_PCIE_DOWN:
return tb_pci_port_is_enabled(port);
case TB_TYPE_DP_HDMI_IN:
case TB_TYPE_DP_HDMI_OUT:
return tb_dp_port_is_enabled(port);
case TB_TYPE_USB3_UP:
case TB_TYPE_USB3_DOWN:
return tb_usb3_port_is_enabled(port);
default:
return false;
}
}
/**
* tb_usb3_port_is_enabled() - Is the USB3 adapter port enabled
* @port: USB3 adapter port to check
*/
bool tb_usb3_port_is_enabled(struct tb_port *port)
{
u32 data;
if (tb_port_read(port, &data, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_0, 1))
return false;
return !!(data & ADP_USB3_CS_0_PE);
}
/**
* tb_usb3_port_enable() - Enable USB3 adapter port
* @port: USB3 adapter port to enable
* @enable: Enable/disable the USB3 adapter
*/
int tb_usb3_port_enable(struct tb_port *port, bool enable)
{
u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V)
: ADP_USB3_CS_0_V;
if (!port->cap_adap)
return -ENXIO;
return tb_port_write(port, &word, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_0, 1);
}
/**
* tb_pci_port_is_enabled() - Is the PCIe adapter port enabled
* @port: PCIe port to check
*/
bool tb_pci_port_is_enabled(struct tb_port *port)
{
u32 data;
if (tb_port_read(port, &data, TB_CFG_PORT,
port->cap_adap + ADP_PCIE_CS_0, 1))
return false;
return !!(data & ADP_PCIE_CS_0_PE);
}
/**
* tb_pci_port_enable() - Enable PCIe adapter port
* @port: PCIe port to enable
* @enable: Enable/disable the PCIe adapter
*/
int tb_pci_port_enable(struct tb_port *port, bool enable)
{
u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0;
if (!port->cap_adap)
return -ENXIO;
return tb_port_write(port, &word, TB_CFG_PORT,
port->cap_adap + ADP_PCIE_CS_0, 1);
}
/**
* tb_dp_port_hpd_is_active() - Is HPD already active
* @port: DP out port to check
*
* Checks if the DP OUT adapter port has HDP bit already set.
*/
int tb_dp_port_hpd_is_active(struct tb_port *port)
{
u32 data;
int ret;
ret = tb_port_read(port, &data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
return !!(data & ADP_DP_CS_2_HDP);
}
/**
* tb_dp_port_hpd_clear() - Clear HPD from DP IN port
* @port: Port to clear HPD
*
* If the DP IN port has HDP set, this function can be used to clear it.
*/
int tb_dp_port_hpd_clear(struct tb_port *port)
{
u32 data;
int ret;
ret = tb_port_read(port, &data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_3, 1);
if (ret)
return ret;
data |= ADP_DP_CS_3_HDPC;
return tb_port_write(port, &data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_3, 1);
}
/**
* tb_dp_port_set_hops() - Set video/aux Hop IDs for DP port
* @port: DP IN/OUT port to set hops
* @video: Video Hop ID
* @aux_tx: AUX TX Hop ID
* @aux_rx: AUX RX Hop ID
*
* Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
* router DP adapters too but does not program the values as the fields
* are read-only.
*/
int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
unsigned int aux_tx, unsigned int aux_rx)
{
u32 data[2];
int ret;
if (tb_switch_is_usb4(port->sw))
return 0;
ret = tb_port_read(port, data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
return ret;
data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK;
data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK;
data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) &
ADP_DP_CS_0_VIDEO_HOPID_MASK;
data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK;
data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) &
ADP_DP_CS_1_AUX_RX_HOPID_MASK;
return tb_port_write(port, data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
}
/**
* tb_dp_port_is_enabled() - Is DP adapter port enabled
* @port: DP adapter port to check
*/
bool tb_dp_port_is_enabled(struct tb_port *port)
{
u32 data[2];
if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0,
ARRAY_SIZE(data)))
return false;
return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE));
}
/**
* tb_dp_port_enable() - Enables/disables DP paths of a port
* @port: DP IN/OUT port
* @enable: Enable/disable DP path
*
* Once Hop IDs are programmed DP paths can be enabled or disabled by
* calling this function.
*/
int tb_dp_port_enable(struct tb_port *port, bool enable)
{
u32 data[2];
int ret;
ret = tb_port_read(port, data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
return ret;
if (enable)
data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE;
else
data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE);
return tb_port_write(port, data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
}
/* switch utility functions */
static const char *tb_switch_generation_name(const struct tb_switch *sw)
{
switch (sw->generation) {
case 1:
return "Thunderbolt 1";
case 2:
return "Thunderbolt 2";
case 3:
return "Thunderbolt 3";
case 4:
return "USB4";
default:
return "Unknown";
}
}
static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
{
const struct tb_regs_switch_header *regs = &sw->config;
tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n",
tb_switch_generation_name(sw), regs->vendor_id, regs->device_id,
regs->revision, regs->thunderbolt_version);
tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number);
tb_dbg(tb, " Config:\n");
tb_dbg(tb,
" Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
regs->upstream_port_number, regs->depth,
(((u64) regs->route_hi) << 32) | regs->route_lo,
regs->enabled, regs->plug_events_delay);
tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
regs->__unknown1, regs->__unknown4);
}
/**
* tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
* @sw: Switch to reset
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_switch_reset(struct tb_switch *sw)
{
struct tb_cfg_result res;
if (sw->generation > 1)
return 0;
tb_sw_dbg(sw, "resetting switch\n");
res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
TB_CFG_SWITCH, 2, 2);
if (res.err)
return res.err;
res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
if (res.err > 0)
return -EIO;
return res.err;
}
/**
* tb_switch_wait_for_bit() - Wait for specified value of bits in offset
* @sw: Router to read the offset value from
* @offset: Offset in the router config space to read from
* @bit: Bit mask in the offset to wait for
* @value: Value of the bits to wait for
* @timeout_msec: Timeout in ms how long to wait
*
* Wait till the specified bits in specified offset reach specified value.
* Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
* within the given timeout or a negative errno in case of failure.
*/
int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
u32 value, int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
do {
u32 val;
int ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
if ((val & bit) == value)
return 0;
usleep_range(50, 100);
} while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
}
/*
* tb_plug_events_active() - enable/disable plug events on a switch
*
* Also configures a sane plug_events_delay of 255ms.
*
* Return: Returns 0 on success or an error code on failure.
*/
static int tb_plug_events_active(struct tb_switch *sw, bool active)
{
u32 data;
int res;
if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw))
return 0;
sw->config.plug_events_delay = 0xff;
res = tb_sw_write(sw, ((u32 *) &sw->config) + 4, TB_CFG_SWITCH, 4, 1);
if (res)
return res;
res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1);
if (res)
return res;
if (active) {
data = data & 0xFFFFFF83;
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
break;
default:
/*
* Skip Alpine Ridge, it needs to have vendor
* specific USB hotplug event enabled for the
* internal xHCI to work.
*/
if (!tb_switch_is_alpine_ridge(sw))
data |= TB_PLUG_EVENTS_USB_DISABLE;
}
} else {
data = data | 0x7c;
}
return tb_sw_write(sw, &data, TB_CFG_SWITCH,
sw->cap_plug_events + 1, 1);
}
static ssize_t authorized_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sysfs_emit(buf, "%u\n", sw->authorized);
}
static int disapprove_switch(struct device *dev, void *not_used)
{
char *envp[] = { "AUTHORIZED=0", NULL };
struct tb_switch *sw;
sw = tb_to_switch(dev);
if (sw && sw->authorized) {
int ret;
/* First children */
ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch);
if (ret)
return ret;
ret = tb_domain_disapprove_switch(sw->tb, sw);
if (ret)
return ret;
sw->authorized = 0;
kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
}
return 0;
}
static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
{
char envp_string[13];
int ret = -EINVAL;
char *envp[] = { envp_string, NULL };
if (!mutex_trylock(&sw->tb->lock))
return restart_syscall();
if (!!sw->authorized == !!val)
goto unlock;
switch (val) {
/* Disapprove switch */
case 0:
if (tb_route(sw)) {
ret = disapprove_switch(&sw->dev, NULL);
goto unlock;
}
break;
/* Approve switch */
case 1:
if (sw->key)
ret = tb_domain_approve_switch_key(sw->tb, sw);
else
ret = tb_domain_approve_switch(sw->tb, sw);
break;
/* Challenge switch */
case 2:
if (sw->key)
ret = tb_domain_challenge_switch_key(sw->tb, sw);
break;
default:
break;
}
if (!ret) {
sw->authorized = val;
/*
* Notify status change to the userspace, informing the new
* value of /sys/bus/thunderbolt/devices/.../authorized.
*/
sprintf(envp_string, "AUTHORIZED=%u", sw->authorized);
kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp);
}
unlock:
mutex_unlock(&sw->tb->lock);
return ret;
}
static ssize_t authorized_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct tb_switch *sw = tb_to_switch(dev);
unsigned int val;
ssize_t ret;
ret = kstrtouint(buf, 0, &val);
if (ret)
return ret;
if (val > 2)
return -EINVAL;
pm_runtime_get_sync(&sw->dev);
ret = tb_switch_set_authorized(sw, val);
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(authorized);
static ssize_t boot_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sysfs_emit(buf, "%u\n", sw->boot);
}
static DEVICE_ATTR_RO(boot);
static ssize_t device_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sysfs_emit(buf, "%#x\n", sw->device);
}
static DEVICE_ATTR_RO(device);
static ssize_t
device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sysfs_emit(buf, "%s\n", sw->device_name ?: "");
}
static DEVICE_ATTR_RO(device_name);
static ssize_t
generation_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sysfs_emit(buf, "%u\n", sw->generation);
}
static DEVICE_ATTR_RO(generation);
static ssize_t key_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
ssize_t ret;
if (!mutex_trylock(&sw->tb->lock))
return restart_syscall();
if (sw->key)
ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
else
ret = sysfs_emit(buf, "\n");
mutex_unlock(&sw->tb->lock);
return ret;
}
static ssize_t key_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct tb_switch *sw = tb_to_switch(dev);
u8 key[TB_SWITCH_KEY_SIZE];
ssize_t ret = count;
bool clear = false;
if (!strcmp(buf, "\n"))
clear = true;
else if (hex2bin(key, buf, sizeof(key)))
return -EINVAL;
if (!mutex_trylock(&sw->tb->lock))
return restart_syscall();
if (sw->authorized) {
ret = -EBUSY;
} else {
kfree(sw->key);
if (clear) {
sw->key = NULL;
} else {
sw->key = kmemdup(key, sizeof(key), GFP_KERNEL);
if (!sw->key)
ret = -ENOMEM;
}
}
mutex_unlock(&sw->tb->lock);
return ret;
}
static DEVICE_ATTR(key, 0600, key_show, key_store);
static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed);
}
/*
* Currently all lanes must run at the same speed but we expose here
* both directions to allow possible asymmetric links in the future.
*/
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
unsigned int width;
switch (sw->link_width) {
case TB_LINK_WIDTH_SINGLE:
case TB_LINK_WIDTH_ASYM_TX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
case TB_LINK_WIDTH_ASYM_RX:
width = 3;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", width);
}
static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
unsigned int width;
switch (sw->link_width) {
case TB_LINK_WIDTH_SINGLE:
case TB_LINK_WIDTH_ASYM_RX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
case TB_LINK_WIDTH_ASYM_TX:
width = 3;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", width);
}
static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
static ssize_t nvm_authenticate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
u32 status;
nvm_get_auth_status(sw, &status);
return sysfs_emit(buf, "%#x\n", status);
}
static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf,
bool disconnect)
{
struct tb_switch *sw = tb_to_switch(dev);
int val, ret;
pm_runtime_get_sync(&sw->dev);
if (!mutex_trylock(&sw->tb->lock)) {
ret = restart_syscall();
goto exit_rpm;
}
if (sw->no_nvm_upgrade) {
ret = -EOPNOTSUPP;
goto exit_unlock;
}
/* If NVMem devices are not yet added */
if (!sw->nvm) {
ret = -EAGAIN;
goto exit_unlock;
}
ret = kstrtoint(buf, 10, &val);
if (ret)
goto exit_unlock;
/* Always clear the authentication status */
nvm_clear_auth_status(sw);
if (val > 0) {
if (val == AUTHENTICATE_ONLY) {
if (disconnect)
ret = -EINVAL;
else
ret = nvm_authenticate(sw, true);
} else {
if (!sw->nvm->flushed) {
if (!sw->nvm->buf) {
ret = -EINVAL;
goto exit_unlock;
}
ret = nvm_validate_and_write(sw);
if (ret || val == WRITE_ONLY)
goto exit_unlock;
}
if (val == WRITE_AND_AUTHENTICATE) {
if (disconnect)
ret = tb_lc_force_power(sw);
else
ret = nvm_authenticate(sw, false);
}
}
}
exit_unlock:
mutex_unlock(&sw->tb->lock);
exit_rpm:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
return ret;
}
static ssize_t nvm_authenticate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int ret = nvm_authenticate_sysfs(dev, buf, false);
if (ret)
return ret;
return count;
}
static DEVICE_ATTR_RW(nvm_authenticate);
static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return nvm_authenticate_show(dev, attr, buf);
}
static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int ret;
ret = nvm_authenticate_sysfs(dev, buf, true);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect);
static ssize_t nvm_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
int ret;
if (!mutex_trylock(&sw->tb->lock))
return restart_syscall();
if (sw->safe_mode)
ret = -ENODATA;
else if (!sw->nvm)
ret = -EAGAIN;
else
ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
mutex_unlock(&sw->tb->lock);
return ret;
}
static DEVICE_ATTR_RO(nvm_version);
static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sysfs_emit(buf, "%#x\n", sw->vendor);
}
static DEVICE_ATTR_RO(vendor);
static ssize_t
vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sysfs_emit(buf, "%s\n", sw->vendor_name ?: "");
}
static DEVICE_ATTR_RO(vendor_name);
static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_switch *sw = tb_to_switch(dev);
return sysfs_emit(buf, "%pUb\n", sw->uuid);
}
static DEVICE_ATTR_RO(unique_id);
static struct attribute *switch_attrs[] = {
&dev_attr_authorized.attr,
&dev_attr_boot.attr,
&dev_attr_device.attr,
&dev_attr_device_name.attr,
&dev_attr_generation.attr,
&dev_attr_key.attr,
&dev_attr_nvm_authenticate.attr,
&dev_attr_nvm_authenticate_on_disconnect.attr,
&dev_attr_nvm_version.attr,
&dev_attr_rx_speed.attr,
&dev_attr_rx_lanes.attr,
&dev_attr_tx_speed.attr,
&dev_attr_tx_lanes.attr,
&dev_attr_vendor.attr,
&dev_attr_vendor_name.attr,
&dev_attr_unique_id.attr,
NULL,
};
static umode_t switch_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct tb_switch *sw = tb_to_switch(dev);
if (attr == &dev_attr_authorized.attr) {
if (sw->tb->security_level == TB_SECURITY_NOPCIE ||
sw->tb->security_level == TB_SECURITY_DPONLY)
return 0;
} else if (attr == &dev_attr_device.attr) {
if (!sw->device)
return 0;
} else if (attr == &dev_attr_device_name.attr) {
if (!sw->device_name)
return 0;
} else if (attr == &dev_attr_vendor.attr) {
if (!sw->vendor)
return 0;
} else if (attr == &dev_attr_vendor_name.attr) {
if (!sw->vendor_name)
return 0;
} else if (attr == &dev_attr_key.attr) {
if (tb_route(sw) &&
sw->tb->security_level == TB_SECURITY_SECURE &&
sw->security_level == TB_SECURITY_SECURE)
return attr->mode;
return 0;
} else if (attr == &dev_attr_rx_speed.attr ||
attr == &dev_attr_rx_lanes.attr ||
attr == &dev_attr_tx_speed.attr ||
attr == &dev_attr_tx_lanes.attr) {
if (tb_route(sw))
return attr->mode;
return 0;
} else if (attr == &dev_attr_nvm_authenticate.attr) {
if (nvm_upgradeable(sw))
return attr->mode;
return 0;
} else if (attr == &dev_attr_nvm_version.attr) {
if (nvm_readable(sw))
return attr->mode;
return 0;
} else if (attr == &dev_attr_boot.attr) {
if (tb_route(sw))
return attr->mode;
return 0;
} else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) {
if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER)
return attr->mode;
return 0;
}
return sw->safe_mode ? 0 : attr->mode;
}
static const struct attribute_group switch_group = {
.is_visible = switch_attr_is_visible,
.attrs = switch_attrs,
};
static const struct attribute_group *switch_groups[] = {
&switch_group,
NULL,
};
static void tb_switch_release(struct device *dev)
{
struct tb_switch *sw = tb_to_switch(dev);
struct tb_port *port;
dma_port_free(sw->dma_port);
tb_switch_for_each_port(sw, port) {
ida_destroy(&port->in_hopids);
ida_destroy(&port->out_hopids);
}
kfree(sw->uuid);
kfree(sw->device_name);
kfree(sw->vendor_name);
kfree(sw->ports);
kfree(sw->drom);
kfree(sw->key);
kfree(sw);
}
static int tb_switch_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct tb_switch *sw = tb_to_switch(dev);
const char *type;
if (tb_switch_is_usb4(sw)) {
if (add_uevent_var(env, "USB4_VERSION=%u.0",
usb4_switch_version(sw)))
return -ENOMEM;
}
if (!tb_route(sw)) {
type = "host";
} else {
const struct tb_port *port;
bool hub = false;
/* Device is hub if it has any downstream ports */
tb_switch_for_each_port(sw, port) {
if (!port->disabled && !tb_is_upstream_port(port) &&
tb_port_is_null(port)) {
hub = true;
break;
}
}
type = hub ? "hub" : "device";
}
if (add_uevent_var(env, "USB4_TYPE=%s", type))
return -ENOMEM;
return 0;
}
/*
* Currently only need to provide the callbacks. Everything else is handled
* in the connection manager.
*/
static int __maybe_unused tb_switch_runtime_suspend(struct device *dev)
{
struct tb_switch *sw = tb_to_switch(dev);
const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
if (cm_ops->runtime_suspend_switch)
return cm_ops->runtime_suspend_switch(sw);
return 0;
}
static int __maybe_unused tb_switch_runtime_resume(struct device *dev)
{
struct tb_switch *sw = tb_to_switch(dev);
const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
if (cm_ops->runtime_resume_switch)
return cm_ops->runtime_resume_switch(sw);
return 0;
}
static const struct dev_pm_ops tb_switch_pm_ops = {
SET_RUNTIME_PM_OPS(tb_switch_runtime_suspend, tb_switch_runtime_resume,
NULL)
};
struct device_type tb_switch_type = {
.name = "thunderbolt_device",
.release = tb_switch_release,
.uevent = tb_switch_uevent,
.pm = &tb_switch_pm_ops,
};
static int tb_switch_get_generation(struct tb_switch *sw)
{
if (tb_switch_is_usb4(sw))
return 4;
if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
switch (sw->config.device_id) {
case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE:
case PCI_DEVICE_ID_INTEL_EAGLE_RIDGE:
case PCI_DEVICE_ID_INTEL_LIGHT_PEAK:
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C:
case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C:
case PCI_DEVICE_ID_INTEL_PORT_RIDGE:
case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_REDWOOD_RIDGE_4C_BRIDGE:
return 1;
case PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE:
return 2;
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE:
case PCI_DEVICE_ID_INTEL_ICL_NHI0:
case PCI_DEVICE_ID_INTEL_ICL_NHI1:
return 3;
}
}
/*
* For unknown switches assume generation to be 1 to be on the
* safe side.
*/
tb_sw_warn(sw, "unsupported switch device id %#x\n",
sw->config.device_id);
return 1;
}
static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth)
{
int max_depth;
if (tb_switch_is_usb4(sw) ||
(sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch)))
max_depth = USB4_SWITCH_MAX_DEPTH;
else
max_depth = TB_SWITCH_MAX_DEPTH;
return depth > max_depth;
}
/**
* tb_switch_alloc() - allocate a switch
* @tb: Pointer to the owning domain
* @parent: Parent device for this switch
* @route: Route string for this switch
*
* Allocates and initializes a switch. Will not upload configuration to
* the switch. For that you need to call tb_switch_configure()
* separately. The returned switch should be released by calling
* tb_switch_put().
*
* Return: Pointer to the allocated switch or ERR_PTR() in case of
* failure.
*/
struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
u64 route)
{
struct tb_switch *sw;
int upstream_port;
int i, ret, depth;
/* Unlock the downstream port so we can access the switch below */
if (route) {
struct tb_switch *parent_sw = tb_to_switch(parent);
struct tb_port *down;
down = tb_port_at(route, parent_sw);
tb_port_unlock(down);
}
depth = tb_route_length(route);
upstream_port = tb_cfg_get_upstream_port(tb->ctl, route);
if (upstream_port < 0)
return ERR_PTR(upstream_port);
sw = kzalloc(sizeof(*sw), GFP_KERNEL);
if (!sw)
return ERR_PTR(-ENOMEM);
sw->tb = tb;
ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5);
if (ret)
goto err_free_sw_ports;
sw->generation = tb_switch_get_generation(sw);
tb_dbg(tb, "current switch config:\n");
tb_dump_switch(tb, sw);
/* configure switch */
sw->config.upstream_port_number = upstream_port;
sw->config.depth = depth;
sw->config.route_hi = upper_32_bits(route);
sw->config.route_lo = lower_32_bits(route);
sw->config.enabled = 0;
/* Make sure we do not exceed maximum topology limit */
if (tb_switch_exceeds_max_depth(sw, depth)) {
ret = -EADDRNOTAVAIL;
goto err_free_sw_ports;
}
/* initialize ports */
sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports),
GFP_KERNEL);
if (!sw->ports) {
ret = -ENOMEM;
goto err_free_sw_ports;
}
for (i = 0; i <= sw->config.max_port_number; i++) {
/* minimum setup for tb_find_cap and tb_drom_read to work */
sw->ports[i].sw = sw;
sw->ports[i].port = i;
/* Control port does not need HopID allocation */
if (i) {
ida_init(&sw->ports[i].in_hopids);
ida_init(&sw->ports[i].out_hopids);
}
}
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS);
if (ret > 0)
sw->cap_plug_events = ret;
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
if (ret > 0)
sw->cap_vsec_tmu = ret;
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
if (ret > 0)
sw->cap_lc = ret;
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
if (ret > 0)
sw->cap_lp = ret;
/* Root switch is always authorized */
if (!route)
sw->authorized = true;
device_initialize(&sw->dev);
sw->dev.parent = parent;
sw->dev.bus = &tb_bus_type;
sw->dev.type = &tb_switch_type;
sw->dev.groups = switch_groups;
dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
return sw;
err_free_sw_ports:
kfree(sw->ports);
kfree(sw);
return ERR_PTR(ret);
}
/**
* tb_switch_alloc_safe_mode() - allocate a switch that is in safe mode
* @tb: Pointer to the owning domain
* @parent: Parent device for this switch
* @route: Route string for this switch
*
* This creates a switch in safe mode. This means the switch pretty much
* lacks all capabilities except DMA configuration port before it is
* flashed with a valid NVM firmware.
*
* The returned switch must be released by calling tb_switch_put().
*
* Return: Pointer to the allocated switch or ERR_PTR() in case of failure
*/
struct tb_switch *
tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route)
{
struct tb_switch *sw;
sw = kzalloc(sizeof(*sw), GFP_KERNEL);
if (!sw)
return ERR_PTR(-ENOMEM);
sw->tb = tb;
sw->config.depth = tb_route_length(route);
sw->config.route_hi = upper_32_bits(route);
sw->config.route_lo = lower_32_bits(route);
sw->safe_mode = true;
device_initialize(&sw->dev);
sw->dev.parent = parent;
sw->dev.bus = &tb_bus_type;
sw->dev.type = &tb_switch_type;
sw->dev.groups = switch_groups;
dev_set_name(&sw->dev, "%u-%llx", tb->index, tb_route(sw));
return sw;
}
/**
* tb_switch_configure() - Uploads configuration to the switch
* @sw: Switch to configure
*
* Call this function before the switch is added to the system. It will
* upload configuration to the switch and makes it available for the
* connection manager to use. Can be called to the switch again after
* resume from low power states to re-initialize it.
*
* Return: %0 in case of success and negative errno in case of failure
*/
int tb_switch_configure(struct tb_switch *sw)
{
struct tb *tb = sw->tb;
u64 route;
int ret;
route = tb_route(sw);
tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n",
sw->config.enabled ? "restoring" : "initializing", route,
tb_route_length(route), sw->config.upstream_port_number);
sw->config.enabled = 1;
if (tb_switch_is_usb4(sw)) {
/*
* For USB4 devices, we need to program the CM version
* accordingly so that it knows to expose all the
* additional capabilities. Program it according to USB4
* version to avoid changing existing (v1) routers behaviour.
*/
if (usb4_switch_version(sw) < 2)
sw->config.cmuv = ROUTER_CS_4_CMUV_V1;
else
sw->config.cmuv = ROUTER_CS_4_CMUV_V2;
sw->config.plug_events_delay = 0xa;
/* Enumerate the switch */
ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
ROUTER_CS_1, 4);
if (ret)
return ret;
ret = usb4_switch_setup(sw);
} else {
if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
tb_sw_warn(sw, "unknown switch vendor id %#x\n",
sw->config.vendor_id);
if (!sw->cap_plug_events) {
tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n");
return -ENODEV;
}
/* Enumerate the switch */
ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH,
ROUTER_CS_1, 3);
}
if (ret)
return ret;
return tb_plug_events_active(sw, true);
}
/**
* tb_switch_configuration_valid() - Set the tunneling configuration to be valid
* @sw: Router to configure
*
* Needs to be called before any tunnels can be setup through the
* router. Can be called to any router.
*
* Returns %0 in success and negative errno otherwise.
*/
int tb_switch_configuration_valid(struct tb_switch *sw)
{
if (tb_switch_is_usb4(sw))
return usb4_switch_configuration_valid(sw);
return 0;
}
static int tb_switch_set_uuid(struct tb_switch *sw)
{
bool uid = false;
u32 uuid[4];
int ret;
if (sw->uuid)
return 0;
if (tb_switch_is_usb4(sw)) {
ret = usb4_switch_read_uid(sw, &sw->uid);
if (ret)
return ret;
uid = true;
} else {
/*
* The newer controllers include fused UUID as part of
* link controller specific registers
*/
ret = tb_lc_read_uuid(sw, uuid);
if (ret) {
if (ret != -EINVAL)
return ret;
uid = true;
}
}
if (uid) {
/*
* ICM generates UUID based on UID and fills the upper
* two words with ones. This is not strictly following
* UUID format but we want to be compatible with it so
* we do the same here.
*/
uuid[0] = sw->uid & 0xffffffff;
uuid[1] = (sw->uid >> 32) & 0xffffffff;
uuid[2] = 0xffffffff;
uuid[3] = 0xffffffff;
}
sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
if (!sw->uuid)
return -ENOMEM;
return 0;
}
static int tb_switch_add_dma_port(struct tb_switch *sw)
{
u32 status;
int ret;
switch (sw->generation) {
case 2:
/* Only root switch can be upgraded */
if (tb_route(sw))
return 0;
fallthrough;
case 3:
case 4:
ret = tb_switch_set_uuid(sw);
if (ret)
return ret;
break;
default:
/*
* DMA port is the only thing available when the switch
* is in safe mode.
*/
if (!sw->safe_mode)
return 0;
break;
}
if (sw->no_nvm_upgrade)
return 0;
if (tb_switch_is_usb4(sw)) {
ret = usb4_switch_nvm_authenticate_status(sw, &status);
if (ret)
return ret;
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
nvm_set_auth_status(sw, status);
}
return 0;
}
/* Root switch DMA port requires running firmware */
if (!tb_route(sw) && !tb_switch_is_icm(sw))
return 0;
sw->dma_port = dma_port_alloc(sw);
if (!sw->dma_port)
return 0;
/*
* If there is status already set then authentication failed
* when the dma_port_flash_update_auth() returned. Power cycling
* is not needed (it was done already) so only thing we do here
* is to unblock runtime PM of the root port.
*/
nvm_get_auth_status(sw, &status);
if (status) {
if (!tb_route(sw))
nvm_authenticate_complete_dma_port(sw);
return 0;
}
/*
* Check status of the previous flash authentication. If there
* is one we need to power cycle the switch in any case to make
* it functional again.
*/
ret = dma_port_flash_update_auth_status(sw->dma_port, &status);
if (ret <= 0)
return ret;
/* Now we can allow root port to suspend again */
if (!tb_route(sw))
nvm_authenticate_complete_dma_port(sw);
if (status) {
tb_sw_info(sw, "switch flash authentication failed\n");
nvm_set_auth_status(sw, status);
}
tb_sw_info(sw, "power cycling the switch now\n");
dma_port_power_cycle(sw->dma_port);
/*
* We return error here which causes the switch adding failure.
* It should appear back after power cycle is complete.
*/
return -ESHUTDOWN;
}
static void tb_switch_default_link_ports(struct tb_switch *sw)
{
int i;
for (i = 1; i <= sw->config.max_port_number; i++) {
struct tb_port *port = &sw->ports[i];
struct tb_port *subordinate;
if (!tb_port_is_null(port))
continue;
/* Check for the subordinate port */
if (i == sw->config.max_port_number ||
!tb_port_is_null(&sw->ports[i + 1]))
continue;
/* Link them if not already done so (by DROM) */
subordinate = &sw->ports[i + 1];
if (!port->dual_link_port && !subordinate->dual_link_port) {
port->link_nr = 0;
port->dual_link_port = subordinate;
subordinate->link_nr = 1;
subordinate->dual_link_port = port;
tb_sw_dbg(sw, "linked ports %d <-> %d\n",
port->port, subordinate->port);
}
}
}
static bool tb_switch_lane_bonding_possible(struct tb_switch *sw)
{
const struct tb_port *up = tb_upstream_port(sw);
if (!up->dual_link_port || !up->dual_link_port->remote)
return false;
if (tb_switch_is_usb4(sw))
return usb4_switch_lane_bonding_possible(sw);
return tb_lc_lane_bonding_possible(sw);
}
static int tb_switch_update_link_attributes(struct tb_switch *sw)
{
struct tb_port *up;
bool change = false;
int ret;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return 0;
up = tb_upstream_port(sw);
ret = tb_port_get_link_speed(up);
if (ret < 0)
return ret;
if (sw->link_speed != ret)
change = true;
sw->link_speed = ret;
ret = tb_port_get_link_width(up);
if (ret < 0)
return ret;
if (sw->link_width != ret)
change = true;
sw->link_width = ret;
/* Notify userspace that there is possible link attribute change */
if (device_is_registered(&sw->dev) && change)
kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE);
return 0;
}
/**
* tb_switch_lane_bonding_enable() - Enable lane bonding
* @sw: Switch to enable lane bonding
*
* Connection manager can call this function to enable lane bonding of a
* switch. If conditions are correct and both switches support the feature,
* lanes are bonded. It is safe to call this to any switch.
*/
int tb_switch_lane_bonding_enable(struct tb_switch *sw)
{
struct tb_port *up, *down;
u64 route = tb_route(sw);
unsigned int width_mask;
int ret;
if (!route)
return 0;
if (!tb_switch_lane_bonding_possible(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
if (!tb_port_is_width_supported(up, TB_LINK_WIDTH_DUAL) ||
!tb_port_is_width_supported(down, TB_LINK_WIDTH_DUAL))
return 0;
ret = tb_port_lane_bonding_enable(up);
if (ret) {
tb_port_warn(up, "failed to enable lane bonding\n");
return ret;
}
ret = tb_port_lane_bonding_enable(down);
if (ret) {
tb_port_warn(down, "failed to enable lane bonding\n");
tb_port_lane_bonding_disable(up);
return ret;
}
/* Any of the widths are all bonded */
width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
TB_LINK_WIDTH_ASYM_RX;
ret = tb_port_wait_for_link_width(down, width_mask, 100);
if (ret) {
tb_port_warn(down, "timeout enabling lane bonding\n");
return ret;
}
tb_port_update_credits(down);
tb_port_update_credits(up);
tb_switch_update_link_attributes(sw);
tb_sw_dbg(sw, "lane bonding enabled\n");
return ret;
}
/**
* tb_switch_lane_bonding_disable() - Disable lane bonding
* @sw: Switch whose lane bonding to disable
*
* Disables lane bonding between @sw and parent. This can be called even
* if lanes were not bonded originally.
*/
void tb_switch_lane_bonding_disable(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
if (!tb_route(sw))
return;
up = tb_upstream_port(sw);
if (!up->bonded)
return;
down = tb_switch_downstream_port(sw);
tb_port_lane_bonding_disable(up);
tb_port_lane_bonding_disable(down);
/*
* It is fine if we get other errors as the router might have
* been unplugged.
*/
ret = tb_port_wait_for_link_width(down, TB_LINK_WIDTH_SINGLE, 100);
if (ret == -ETIMEDOUT)
tb_sw_warn(sw, "timeout disabling lane bonding\n");
tb_port_update_credits(down);
tb_port_update_credits(up);
tb_switch_update_link_attributes(sw);
tb_sw_dbg(sw, "lane bonding disabled\n");
}
/**
* tb_switch_configure_link() - Set link configured
* @sw: Switch whose link is configured
*
* Sets the link upstream from @sw configured (from both ends) so that
* it will not be disconnected when the domain exits sleep. Can be
* called for any switch.
*
* It is recommended that this is called after lane bonding is enabled.
*
* Returns %0 on success and negative errno in case of error.
*/
int tb_switch_configure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return 0;
up = tb_upstream_port(sw);
if (tb_switch_is_usb4(up->sw))
ret = usb4_port_configure(up);
else
ret = tb_lc_configure_port(up);
if (ret)
return ret;
down = up->remote;
if (tb_switch_is_usb4(down->sw))
return usb4_port_configure(down);
return tb_lc_configure_port(down);
}
/**
* tb_switch_unconfigure_link() - Unconfigure link
* @sw: Switch whose link is unconfigured
*
* Sets the link unconfigured so the @sw will be disconnected if the
* domain exists sleep.
*/
void tb_switch_unconfigure_link(struct tb_switch *sw)
{
struct tb_port *up, *down;
if (sw->is_unplugged)
return;
if (!tb_route(sw) || tb_switch_is_icm(sw))
return;
up = tb_upstream_port(sw);
if (tb_switch_is_usb4(up->sw))
usb4_port_unconfigure(up);
else
tb_lc_unconfigure_port(up);
down = up->remote;
if (tb_switch_is_usb4(down->sw))
usb4_port_unconfigure(down);
else
tb_lc_unconfigure_port(down);
}
static void tb_switch_credits_init(struct tb_switch *sw)
{
if (tb_switch_is_icm(sw))
return;
if (!tb_switch_is_usb4(sw))
return;
if (usb4_switch_credits_init(sw))
tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n");
}
static int tb_switch_port_hotplug_enable(struct tb_switch *sw)
{
struct tb_port *port;
if (tb_switch_is_icm(sw))
return 0;
tb_switch_for_each_port(sw, port) {
int res;
if (!port->cap_usb4)
continue;
res = usb4_port_hotplug_enable(port);
if (res)
return res;
}
return 0;
}
/**
* tb_switch_add() - Add a switch to the domain
* @sw: Switch to add
*
* This is the last step in adding switch to the domain. It will read
* identification information from DROM and initializes ports so that
* they can be used to connect other switches. The switch will be
* exposed to the userspace when this function successfully returns. To
* remove and release the switch, call tb_switch_remove().
*
* Return: %0 in case of success and negative errno in case of failure
*/
int tb_switch_add(struct tb_switch *sw)
{
int i, ret;
/*
* Initialize DMA control port now before we read DROM. Recent
* host controllers have more complete DROM on NVM that includes
* vendor and model identification strings which we then expose
* to the userspace. NVM can be accessed through DMA
* configuration based mailbox.
*/
ret = tb_switch_add_dma_port(sw);
if (ret) {
dev_err(&sw->dev, "failed to add DMA port\n");
return ret;
}
if (!sw->safe_mode) {
tb_switch_credits_init(sw);
/* read drom */
ret = tb_drom_read(sw);
if (ret)
dev_warn(&sw->dev, "reading DROM failed: %d\n", ret);
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
ret = tb_switch_set_uuid(sw);
if (ret) {
dev_err(&sw->dev, "failed to set UUID\n");
return ret;
}
for (i = 0; i <= sw->config.max_port_number; i++) {
if (sw->ports[i].disabled) {
tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
continue;
}
ret = tb_init_port(&sw->ports[i]);
if (ret) {
dev_err(&sw->dev, "failed to initialize port %d\n", i);
return ret;
}
}
tb_check_quirks(sw);
tb_switch_default_link_ports(sw);
ret = tb_switch_update_link_attributes(sw);
if (ret)
return ret;
ret = tb_switch_clx_init(sw);
if (ret)
return ret;
ret = tb_switch_tmu_init(sw);
if (ret)
return ret;
}
ret = tb_switch_port_hotplug_enable(sw);
if (ret)
return ret;
ret = device_add(&sw->dev);
if (ret) {
dev_err(&sw->dev, "failed to add device: %d\n", ret);
return ret;
}
if (tb_route(sw)) {
dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
sw->vendor, sw->device);
if (sw->vendor_name && sw->device_name)
dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
sw->device_name);
}
ret = usb4_switch_add_ports(sw);
if (ret) {
dev_err(&sw->dev, "failed to add USB4 ports\n");
goto err_del;
}
ret = tb_switch_nvm_add(sw);
if (ret) {
dev_err(&sw->dev, "failed to add NVM devices\n");
goto err_ports;
}
/*
* Thunderbolt routers do not generate wakeups themselves but
* they forward wakeups from tunneled protocols, so enable it
* here.
*/
device_init_wakeup(&sw->dev, true);
pm_runtime_set_active(&sw->dev);
if (sw->rpm) {
pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&sw->dev);
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_enable(&sw->dev);
pm_request_autosuspend(&sw->dev);
}
tb_switch_debugfs_init(sw);
return 0;
err_ports:
usb4_switch_remove_ports(sw);
err_del:
device_del(&sw->dev);
return ret;
}
/**
* tb_switch_remove() - Remove and release a switch
* @sw: Switch to remove
*
* This will remove the switch from the domain and release it after last
* reference count drops to zero. If there are switches connected below
* this switch, they will be removed as well.
*/
void tb_switch_remove(struct tb_switch *sw)
{
struct tb_port *port;
tb_switch_debugfs_remove(sw);
if (sw->rpm) {
pm_runtime_get_sync(&sw->dev);
pm_runtime_disable(&sw->dev);
}
/* port 0 is the switch itself and never has a remote */
tb_switch_for_each_port(sw, port) {
if (tb_port_has_remote(port)) {
tb_switch_remove(port->remote->sw);
port->remote = NULL;
} else if (port->xdomain) {
tb_xdomain_remove(port->xdomain);
port->xdomain = NULL;
}
/* Remove any downstream retimers */
tb_retimer_remove_all(port);
}
if (!sw->is_unplugged)
tb_plug_events_active(sw, false);
tb_switch_nvm_remove(sw);
usb4_switch_remove_ports(sw);
if (tb_route(sw))
dev_info(&sw->dev, "device disconnected\n");
device_unregister(&sw->dev);
}
/**
* tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches
* @sw: Router to mark unplugged
*/
void tb_sw_set_unplugged(struct tb_switch *sw)
{
struct tb_port *port;
if (sw == sw->tb->root_switch) {
tb_sw_WARN(sw, "cannot unplug root switch\n");
return;
}
if (sw->is_unplugged) {
tb_sw_WARN(sw, "is_unplugged already set\n");
return;
}
sw->is_unplugged = true;
tb_switch_for_each_port(sw, port) {
if (tb_port_has_remote(port))
tb_sw_set_unplugged(port->remote->sw);
else if (port->xdomain)
port->xdomain->is_unplugged = true;
}
}
static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
{
if (flags)
tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
else
tb_sw_dbg(sw, "disabling wakeup\n");
if (tb_switch_is_usb4(sw))
return usb4_switch_set_wake(sw, flags);
return tb_lc_set_wake(sw, flags);
}
int tb_switch_resume(struct tb_switch *sw)
{
struct tb_port *port;
int err;
tb_sw_dbg(sw, "resuming switch\n");
/*
* Check for UID of the connected switches except for root
* switch which we assume cannot be removed.
*/
if (tb_route(sw)) {
u64 uid;
/*
* Check first that we can still read the switch config
* space. It may be that there is now another domain
* connected.
*/
err = tb_cfg_get_upstream_port(sw->tb->ctl, tb_route(sw));
if (err < 0) {
tb_sw_info(sw, "switch not present anymore\n");
return err;
}
/* We don't have any way to confirm this was the same device */
if (!sw->uid)
return -ENODEV;
if (tb_switch_is_usb4(sw))
err = usb4_switch_read_uid(sw, &uid);
else
err = tb_drom_read_uid_only(sw, &uid);
if (err) {
tb_sw_warn(sw, "uid read failed\n");
return err;
}
if (sw->uid != uid) {
tb_sw_info(sw,
"changed while suspended (uid %#llx -> %#llx)\n",
sw->uid, uid);
return -ENODEV;
}
}
err = tb_switch_configure(sw);
if (err)
return err;
/* Disable wakes */
tb_switch_set_wake(sw, 0);
err = tb_switch_tmu_init(sw);
if (err)
return err;
/* check for surviving downstream switches */
tb_switch_for_each_port(sw, port) {
if (!tb_port_is_null(port))
continue;
if (!tb_port_resume(port))
continue;
if (tb_wait_for_port(port, true) <= 0) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
if (tb_port_has_remote(port))
tb_sw_set_unplugged(port->remote->sw);
else if (port->xdomain)
port->xdomain->is_unplugged = true;
} else {
/*
* Always unlock the port so the downstream
* switch/domain is accessible.
*/
if (tb_port_unlock(port))
tb_port_warn(port, "failed to unlock port\n");
if (port->remote && tb_switch_resume(port->remote->sw)) {
tb_port_warn(port,
"lost during suspend, disconnecting\n");
tb_sw_set_unplugged(port->remote->sw);
}
}
}
return 0;
}
/**
* tb_switch_suspend() - Put a switch to sleep
* @sw: Switch to suspend
* @runtime: Is this runtime suspend or system sleep
*
* Suspends router and all its children. Enables wakes according to
* value of @runtime and then sets sleep bit for the router. If @sw is
* host router the domain is ready to go to sleep once this function
* returns.
*/
void tb_switch_suspend(struct tb_switch *sw, bool runtime)
{
unsigned int flags = 0;
struct tb_port *port;
int err;
tb_sw_dbg(sw, "suspending switch\n");
/*
* Actually only needed for Titan Ridge but for simplicity can be
* done for USB4 device too as CLx is re-enabled at resume.
*/
tb_switch_clx_disable(sw);
err = tb_plug_events_active(sw, false);
if (err)
return;
tb_switch_for_each_port(sw, port) {
if (tb_port_has_remote(port))
tb_switch_suspend(port->remote->sw, runtime);
}
if (runtime) {
/* Trigger wake when something is plugged in/out */
flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
flags |= TB_WAKE_ON_USB4;
flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
} else if (device_may_wakeup(&sw->dev)) {
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
}
tb_switch_set_wake(sw, flags);
if (tb_switch_is_usb4(sw))
usb4_switch_set_sleep(sw);
else
tb_lc_set_sleep(sw);
}
/**
* tb_switch_query_dp_resource() - Query availability of DP resource
* @sw: Switch whose DP resource is queried
* @in: DP IN port
*
* Queries availability of DP resource for DP tunneling using switch
* specific means. Returns %true if resource is available.
*/
bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
if (tb_switch_is_usb4(sw))
return usb4_switch_query_dp_resource(sw, in);
return tb_lc_dp_sink_query(sw, in);
}
/**
* tb_switch_alloc_dp_resource() - Allocate available DP resource
* @sw: Switch whose DP resource is allocated
* @in: DP IN port
*
* Allocates DP resource for DP tunneling. The resource must be
* available for this to succeed (see tb_switch_query_dp_resource()).
* Returns %0 in success and negative errno otherwise.
*/
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
int ret;
if (tb_switch_is_usb4(sw))
ret = usb4_switch_alloc_dp_resource(sw, in);
else
ret = tb_lc_dp_sink_alloc(sw, in);
if (ret)
tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
in->port);
else
tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
return ret;
}
/**
* tb_switch_dealloc_dp_resource() - De-allocate DP resource
* @sw: Switch whose DP resource is de-allocated
* @in: DP IN port
*
* De-allocates DP resource that was previously allocated for DP
* tunneling.
*/
void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
int ret;
if (tb_switch_is_usb4(sw))
ret = usb4_switch_dealloc_dp_resource(sw, in);
else
ret = tb_lc_dp_sink_dealloc(sw, in);
if (ret)
tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
in->port);
else
tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
}
struct tb_sw_lookup {
struct tb *tb;
u8 link;
u8 depth;
const uuid_t *uuid;
u64 route;
};
static int tb_switch_match(struct device *dev, const void *data)
{
struct tb_switch *sw = tb_to_switch(dev);
const struct tb_sw_lookup *lookup = data;
if (!sw)
return 0;
if (sw->tb != lookup->tb)
return 0;
if (lookup->uuid)
return !memcmp(sw->uuid, lookup->uuid, sizeof(*lookup->uuid));
if (lookup->route) {
return sw->config.route_lo == lower_32_bits(lookup->route) &&
sw->config.route_hi == upper_32_bits(lookup->route);
}
/* Root switch is matched only by depth */
if (!lookup->depth)
return !sw->depth;
return sw->link == lookup->link && sw->depth == lookup->depth;
}
/**
* tb_switch_find_by_link_depth() - Find switch by link and depth
* @tb: Domain the switch belongs
* @link: Link number the switch is connected
* @depth: Depth of the switch in link
*
* Returned switch has reference count increased so the caller needs to
* call tb_switch_put() when done with the switch.
*/
struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth)
{
struct tb_sw_lookup lookup;
struct device *dev;
memset(&lookup, 0, sizeof(lookup));
lookup.tb = tb;
lookup.link = link;
lookup.depth = depth;
dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
if (dev)
return tb_to_switch(dev);
return NULL;
}
/**
* tb_switch_find_by_uuid() - Find switch by UUID
* @tb: Domain the switch belongs
* @uuid: UUID to look for
*
* Returned switch has reference count increased so the caller needs to
* call tb_switch_put() when done with the switch.
*/
struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid)
{
struct tb_sw_lookup lookup;
struct device *dev;
memset(&lookup, 0, sizeof(lookup));
lookup.tb = tb;
lookup.uuid = uuid;
dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
if (dev)
return tb_to_switch(dev);
return NULL;
}
/**
* tb_switch_find_by_route() - Find switch by route string
* @tb: Domain the switch belongs
* @route: Route string to look for
*
* Returned switch has reference count increased so the caller needs to
* call tb_switch_put() when done with the switch.
*/
struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route)
{
struct tb_sw_lookup lookup;
struct device *dev;
if (!route)
return tb_switch_get(tb->root_switch);
memset(&lookup, 0, sizeof(lookup));
lookup.tb = tb;
lookup.route = route;
dev = bus_find_device(&tb_bus_type, NULL, &lookup, tb_switch_match);
if (dev)
return tb_to_switch(dev);
return NULL;
}
/**
* tb_switch_find_port() - return the first port of @type on @sw or NULL
* @sw: Switch to find the port from
* @type: Port type to look for
*/
struct tb_port *tb_switch_find_port(struct tb_switch *sw,
enum tb_port_type type)
{
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
if (port->config.type == type)
return port;
}
return NULL;
}
/*
* Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
* device. For now used only for Titan Ridge.
*/
static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
unsigned int pcie_offset, u32 value)
{
u32 offset, command, val;
int ret;
if (sw->generation != 3)
return -EOPNOTSUPP;
offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
<< TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
ret = tb_switch_wait_for_bit(sw, offset,
TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
if (ret)
return ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
return -ETIMEDOUT;
return 0;
}
/**
* tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
* @sw: Router to enable PCIe L1
*
* For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
* entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
* was configured. Due to Intel platforms limitation, shall be called only
* for first hop switch.
*/
int tb_switch_pcie_l1_enable(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
int ret;
if (!tb_route(sw))
return 0;
if (!tb_switch_is_titan_ridge(sw))
return 0;
/* Enable PCIe L1 enable only for first hop router (depth = 1) */
if (tb_route(parent))
return 0;
/* Write to downstream PCIe bridge #5 aka Dn4 */
ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
if (ret)
return ret;
/* Write to Upstream PCIe bridge #0 aka Up0 */
return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
}
/**
* tb_switch_xhci_connect() - Connect internal xHCI
* @sw: Router whose xHCI to connect
*
* Can be called to any router. For Alpine Ridge and Titan Ridge
* performs special flows that bring the xHCI functional for any device
* connected to the type-C port. Call only after PCIe tunnel has been
* established. The function only does the connect if not done already
* so can be called several times for the same router.
*/
int tb_switch_xhci_connect(struct tb_switch *sw)
{
struct tb_port *port1, *port3;
int ret;
if (sw->generation != 3)
return 0;
port1 = &sw->ports[1];
port3 = &sw->ports[3];
if (tb_switch_is_alpine_ridge(sw)) {
bool usb_port1, usb_port3, xhci_port1, xhci_port3;
usb_port1 = tb_lc_is_usb_plugged(port1);
usb_port3 = tb_lc_is_usb_plugged(port3);
xhci_port1 = tb_lc_is_xhci_connected(port1);
xhci_port3 = tb_lc_is_xhci_connected(port3);
/* Figure out correct USB port to connect */
if (usb_port1 && !xhci_port1) {
ret = tb_lc_xhci_connect(port1);
if (ret)
return ret;
}
if (usb_port3 && !xhci_port3)
return tb_lc_xhci_connect(port3);
} else if (tb_switch_is_titan_ridge(sw)) {
ret = tb_lc_xhci_connect(port1);
if (ret)
return ret;
return tb_lc_xhci_connect(port3);
}
return 0;
}
/**
* tb_switch_xhci_disconnect() - Disconnect internal xHCI
* @sw: Router whose xHCI to disconnect
*
* The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both
* ports.
*/
void tb_switch_xhci_disconnect(struct tb_switch *sw)
{
if (sw->generation == 3) {
struct tb_port *port1 = &sw->ports[1];
struct tb_port *port3 = &sw->ports[3];
tb_lc_xhci_disconnect(port1);
tb_port_dbg(port1, "disconnected xHCI\n");
tb_lc_xhci_disconnect(port3);
tb_port_dbg(port3, "disconnected xHCI\n");
}
}
| linux-master | drivers/thunderbolt/switch.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt XDomain property support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <[email protected]>
* Mika Westerberg <[email protected]>
*/
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/uuid.h>
#include <linux/thunderbolt.h>
struct tb_property_entry {
u32 key_hi;
u32 key_lo;
u16 length;
u8 reserved;
u8 type;
u32 value;
};
struct tb_property_rootdir_entry {
u32 magic;
u32 length;
struct tb_property_entry entries[];
};
struct tb_property_dir_entry {
u32 uuid[4];
struct tb_property_entry entries[];
};
#define TB_PROPERTY_ROOTDIR_MAGIC 0x55584401
static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
size_t block_len, unsigned int dir_offset, size_t dir_len,
bool is_root);
static inline void parse_dwdata(void *dst, const void *src, size_t dwords)
{
be32_to_cpu_array(dst, src, dwords);
}
static inline void format_dwdata(void *dst, const void *src, size_t dwords)
{
cpu_to_be32_array(dst, src, dwords);
}
static bool tb_property_entry_valid(const struct tb_property_entry *entry,
size_t block_len)
{
switch (entry->type) {
case TB_PROPERTY_TYPE_DIRECTORY:
case TB_PROPERTY_TYPE_DATA:
case TB_PROPERTY_TYPE_TEXT:
if (entry->length > block_len)
return false;
if (entry->value + entry->length > block_len)
return false;
break;
case TB_PROPERTY_TYPE_VALUE:
if (entry->length != 1)
return false;
break;
}
return true;
}
static bool tb_property_key_valid(const char *key)
{
return key && strlen(key) <= TB_PROPERTY_KEY_SIZE;
}
static struct tb_property *
tb_property_alloc(const char *key, enum tb_property_type type)
{
struct tb_property *property;
property = kzalloc(sizeof(*property), GFP_KERNEL);
if (!property)
return NULL;
strcpy(property->key, key);
property->type = type;
INIT_LIST_HEAD(&property->list);
return property;
}
static struct tb_property *tb_property_parse(const u32 *block, size_t block_len,
const struct tb_property_entry *entry)
{
char key[TB_PROPERTY_KEY_SIZE + 1];
struct tb_property *property;
struct tb_property_dir *dir;
if (!tb_property_entry_valid(entry, block_len))
return NULL;
parse_dwdata(key, entry, 2);
key[TB_PROPERTY_KEY_SIZE] = '\0';
property = tb_property_alloc(key, entry->type);
if (!property)
return NULL;
property->length = entry->length;
switch (property->type) {
case TB_PROPERTY_TYPE_DIRECTORY:
dir = __tb_property_parse_dir(block, block_len, entry->value,
entry->length, false);
if (!dir) {
kfree(property);
return NULL;
}
property->value.dir = dir;
break;
case TB_PROPERTY_TYPE_DATA:
property->value.data = kcalloc(property->length, sizeof(u32),
GFP_KERNEL);
if (!property->value.data) {
kfree(property);
return NULL;
}
parse_dwdata(property->value.data, block + entry->value,
entry->length);
break;
case TB_PROPERTY_TYPE_TEXT:
property->value.text = kcalloc(property->length, sizeof(u32),
GFP_KERNEL);
if (!property->value.text) {
kfree(property);
return NULL;
}
parse_dwdata(property->value.text, block + entry->value,
entry->length);
/* Force null termination */
property->value.text[property->length * 4 - 1] = '\0';
break;
case TB_PROPERTY_TYPE_VALUE:
property->value.immediate = entry->value;
break;
default:
property->type = TB_PROPERTY_TYPE_UNKNOWN;
break;
}
return property;
}
static struct tb_property_dir *__tb_property_parse_dir(const u32 *block,
size_t block_len, unsigned int dir_offset, size_t dir_len, bool is_root)
{
const struct tb_property_entry *entries;
size_t i, content_len, nentries;
unsigned int content_offset;
struct tb_property_dir *dir;
dir = kzalloc(sizeof(*dir), GFP_KERNEL);
if (!dir)
return NULL;
if (is_root) {
content_offset = dir_offset + 2;
content_len = dir_len;
} else {
dir->uuid = kmemdup(&block[dir_offset], sizeof(*dir->uuid),
GFP_KERNEL);
if (!dir->uuid) {
tb_property_free_dir(dir);
return NULL;
}
content_offset = dir_offset + 4;
content_len = dir_len - 4; /* Length includes UUID */
}
entries = (const struct tb_property_entry *)&block[content_offset];
nentries = content_len / (sizeof(*entries) / 4);
INIT_LIST_HEAD(&dir->properties);
for (i = 0; i < nentries; i++) {
struct tb_property *property;
property = tb_property_parse(block, block_len, &entries[i]);
if (!property) {
tb_property_free_dir(dir);
return NULL;
}
list_add_tail(&property->list, &dir->properties);
}
return dir;
}
/**
* tb_property_parse_dir() - Parses properties from given property block
* @block: Property block to parse
* @block_len: Number of dword elements in the property block
*
* This function parses the XDomain properties data block into format that
* can be traversed using the helper functions provided by this module.
* Upon success returns the parsed directory. In case of error returns
* %NULL. The resulting &struct tb_property_dir needs to be released by
* calling tb_property_free_dir() when not needed anymore.
*
* The @block is expected to be root directory.
*/
struct tb_property_dir *tb_property_parse_dir(const u32 *block,
size_t block_len)
{
const struct tb_property_rootdir_entry *rootdir =
(const struct tb_property_rootdir_entry *)block;
if (rootdir->magic != TB_PROPERTY_ROOTDIR_MAGIC)
return NULL;
if (rootdir->length > block_len)
return NULL;
return __tb_property_parse_dir(block, block_len, 0, rootdir->length,
true);
}
/**
* tb_property_create_dir() - Creates new property directory
* @uuid: UUID used to identify the particular directory
*
* Creates new, empty property directory. If @uuid is %NULL then the
* directory is assumed to be root directory.
*/
struct tb_property_dir *tb_property_create_dir(const uuid_t *uuid)
{
struct tb_property_dir *dir;
dir = kzalloc(sizeof(*dir), GFP_KERNEL);
if (!dir)
return NULL;
INIT_LIST_HEAD(&dir->properties);
if (uuid) {
dir->uuid = kmemdup(uuid, sizeof(*dir->uuid), GFP_KERNEL);
if (!dir->uuid) {
kfree(dir);
return NULL;
}
}
return dir;
}
EXPORT_SYMBOL_GPL(tb_property_create_dir);
static void tb_property_free(struct tb_property *property)
{
switch (property->type) {
case TB_PROPERTY_TYPE_DIRECTORY:
tb_property_free_dir(property->value.dir);
break;
case TB_PROPERTY_TYPE_DATA:
kfree(property->value.data);
break;
case TB_PROPERTY_TYPE_TEXT:
kfree(property->value.text);
break;
default:
break;
}
kfree(property);
}
/**
* tb_property_free_dir() - Release memory allocated for property directory
* @dir: Directory to release
*
* This will release all the memory the directory occupies including all
* descendants. It is OK to pass %NULL @dir, then the function does
* nothing.
*/
void tb_property_free_dir(struct tb_property_dir *dir)
{
struct tb_property *property, *tmp;
if (!dir)
return;
list_for_each_entry_safe(property, tmp, &dir->properties, list) {
list_del(&property->list);
tb_property_free(property);
}
kfree(dir->uuid);
kfree(dir);
}
EXPORT_SYMBOL_GPL(tb_property_free_dir);
static size_t tb_property_dir_length(const struct tb_property_dir *dir,
bool recurse, size_t *data_len)
{
const struct tb_property *property;
size_t len = 0;
if (dir->uuid)
len += sizeof(*dir->uuid) / 4;
else
len += sizeof(struct tb_property_rootdir_entry) / 4;
list_for_each_entry(property, &dir->properties, list) {
len += sizeof(struct tb_property_entry) / 4;
switch (property->type) {
case TB_PROPERTY_TYPE_DIRECTORY:
if (recurse) {
len += tb_property_dir_length(
property->value.dir, recurse, data_len);
}
/* Reserve dword padding after each directory */
if (data_len)
*data_len += 1;
break;
case TB_PROPERTY_TYPE_DATA:
case TB_PROPERTY_TYPE_TEXT:
if (data_len)
*data_len += property->length;
break;
default:
break;
}
}
return len;
}
static ssize_t __tb_property_format_dir(const struct tb_property_dir *dir,
u32 *block, unsigned int start_offset, size_t block_len)
{
unsigned int data_offset, dir_end;
const struct tb_property *property;
struct tb_property_entry *entry;
size_t dir_len, data_len = 0;
int ret;
/*
* The structure of property block looks like following. Leaf
* data/text is included right after the directory and each
* directory follows each other (even nested ones).
*
* +----------+ <-- start_offset
* | header | <-- root directory header
* +----------+ ---
* | entry 0 | -^--------------------.
* +----------+ | |
* | entry 1 | -|--------------------|--.
* +----------+ | | |
* | entry 2 | -|-----------------. | |
* +----------+ | | | |
* : : | dir_len | | |
* . . | | | |
* : : | | | |
* +----------+ | | | |
* | entry n | v | | |
* +----------+ <-- data_offset | | |
* | data 0 | <------------------|--' |
* +----------+ | |
* | data 1 | <------------------|-----'
* +----------+ |
* | 00000000 | padding |
* +----------+ <-- dir_end <------'
* | UUID | <-- directory UUID (child directory)
* +----------+
* | entry 0 |
* +----------+
* | entry 1 |
* +----------+
* : :
* . .
* : :
* +----------+
* | entry n |
* +----------+
* | data 0 |
* +----------+
*
* We use dir_end to hold pointer to the end of the directory. It
* will increase as we add directories and each directory should be
* added starting from previous dir_end.
*/
dir_len = tb_property_dir_length(dir, false, &data_len);
data_offset = start_offset + dir_len;
dir_end = start_offset + data_len + dir_len;
if (data_offset > dir_end)
return -EINVAL;
if (dir_end > block_len)
return -EINVAL;
/* Write headers first */
if (dir->uuid) {
struct tb_property_dir_entry *pe;
pe = (struct tb_property_dir_entry *)&block[start_offset];
memcpy(pe->uuid, dir->uuid, sizeof(pe->uuid));
entry = pe->entries;
} else {
struct tb_property_rootdir_entry *re;
re = (struct tb_property_rootdir_entry *)&block[start_offset];
re->magic = TB_PROPERTY_ROOTDIR_MAGIC;
re->length = dir_len - sizeof(*re) / 4;
entry = re->entries;
}
list_for_each_entry(property, &dir->properties, list) {
const struct tb_property_dir *child;
format_dwdata(entry, property->key, 2);
entry->type = property->type;
switch (property->type) {
case TB_PROPERTY_TYPE_DIRECTORY:
child = property->value.dir;
ret = __tb_property_format_dir(child, block, dir_end,
block_len);
if (ret < 0)
return ret;
entry->length = tb_property_dir_length(child, false,
NULL);
entry->value = dir_end;
dir_end = ret;
break;
case TB_PROPERTY_TYPE_DATA:
format_dwdata(&block[data_offset], property->value.data,
property->length);
entry->length = property->length;
entry->value = data_offset;
data_offset += entry->length;
break;
case TB_PROPERTY_TYPE_TEXT:
format_dwdata(&block[data_offset], property->value.text,
property->length);
entry->length = property->length;
entry->value = data_offset;
data_offset += entry->length;
break;
case TB_PROPERTY_TYPE_VALUE:
entry->length = property->length;
entry->value = property->value.immediate;
break;
default:
break;
}
entry++;
}
return dir_end;
}
/**
* tb_property_format_dir() - Formats directory to the packed XDomain format
* @dir: Directory to format
* @block: Property block where the packed data is placed
* @block_len: Length of the property block
*
* This function formats the directory to the packed format that can be
* then send over the thunderbolt fabric to receiving host. Returns %0 in
* case of success and negative errno on faulure. Passing %NULL in @block
* returns number of entries the block takes.
*/
ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block,
size_t block_len)
{
ssize_t ret;
if (!block) {
size_t dir_len, data_len = 0;
dir_len = tb_property_dir_length(dir, true, &data_len);
return dir_len + data_len;
}
ret = __tb_property_format_dir(dir, block, 0, block_len);
return ret < 0 ? ret : 0;
}
/**
* tb_property_copy_dir() - Take a deep copy of directory
* @dir: Directory to copy
*
* This function takes a deep copy of @dir and returns back the copy. In
* case of error returns %NULL. The resulting directory needs to be
* released by calling tb_property_free_dir().
*/
struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir)
{
struct tb_property *property, *p = NULL;
struct tb_property_dir *d;
if (!dir)
return NULL;
d = tb_property_create_dir(dir->uuid);
if (!d)
return NULL;
list_for_each_entry(property, &dir->properties, list) {
struct tb_property *p;
p = tb_property_alloc(property->key, property->type);
if (!p)
goto err_free;
p->length = property->length;
switch (property->type) {
case TB_PROPERTY_TYPE_DIRECTORY:
p->value.dir = tb_property_copy_dir(property->value.dir);
if (!p->value.dir)
goto err_free;
break;
case TB_PROPERTY_TYPE_DATA:
p->value.data = kmemdup(property->value.data,
property->length * 4,
GFP_KERNEL);
if (!p->value.data)
goto err_free;
break;
case TB_PROPERTY_TYPE_TEXT:
p->value.text = kzalloc(p->length * 4, GFP_KERNEL);
if (!p->value.text)
goto err_free;
strcpy(p->value.text, property->value.text);
break;
case TB_PROPERTY_TYPE_VALUE:
p->value.immediate = property->value.immediate;
break;
default:
break;
}
list_add_tail(&p->list, &d->properties);
}
return d;
err_free:
kfree(p);
tb_property_free_dir(d);
return NULL;
}
/**
* tb_property_add_immediate() - Add immediate property to directory
* @parent: Directory to add the property
* @key: Key for the property
* @value: Immediate value to store with the property
*/
int tb_property_add_immediate(struct tb_property_dir *parent, const char *key,
u32 value)
{
struct tb_property *property;
if (!tb_property_key_valid(key))
return -EINVAL;
property = tb_property_alloc(key, TB_PROPERTY_TYPE_VALUE);
if (!property)
return -ENOMEM;
property->length = 1;
property->value.immediate = value;
list_add_tail(&property->list, &parent->properties);
return 0;
}
EXPORT_SYMBOL_GPL(tb_property_add_immediate);
/**
* tb_property_add_data() - Adds arbitrary data property to directory
* @parent: Directory to add the property
* @key: Key for the property
* @buf: Data buffer to add
* @buflen: Number of bytes in the data buffer
*
* Function takes a copy of @buf and adds it to the directory.
*/
int tb_property_add_data(struct tb_property_dir *parent, const char *key,
const void *buf, size_t buflen)
{
/* Need to pad to dword boundary */
size_t size = round_up(buflen, 4);
struct tb_property *property;
if (!tb_property_key_valid(key))
return -EINVAL;
property = tb_property_alloc(key, TB_PROPERTY_TYPE_DATA);
if (!property)
return -ENOMEM;
property->length = size / 4;
property->value.data = kzalloc(size, GFP_KERNEL);
if (!property->value.data) {
kfree(property);
return -ENOMEM;
}
memcpy(property->value.data, buf, buflen);
list_add_tail(&property->list, &parent->properties);
return 0;
}
EXPORT_SYMBOL_GPL(tb_property_add_data);
/**
* tb_property_add_text() - Adds string property to directory
* @parent: Directory to add the property
* @key: Key for the property
* @text: String to add
*
* Function takes a copy of @text and adds it to the directory.
*/
int tb_property_add_text(struct tb_property_dir *parent, const char *key,
const char *text)
{
/* Need to pad to dword boundary */
size_t size = round_up(strlen(text) + 1, 4);
struct tb_property *property;
if (!tb_property_key_valid(key))
return -EINVAL;
property = tb_property_alloc(key, TB_PROPERTY_TYPE_TEXT);
if (!property)
return -ENOMEM;
property->length = size / 4;
property->value.text = kzalloc(size, GFP_KERNEL);
if (!property->value.text) {
kfree(property);
return -ENOMEM;
}
strcpy(property->value.text, text);
list_add_tail(&property->list, &parent->properties);
return 0;
}
EXPORT_SYMBOL_GPL(tb_property_add_text);
/**
* tb_property_add_dir() - Adds a directory to the parent directory
* @parent: Directory to add the property
* @key: Key for the property
* @dir: Directory to add
*/
int tb_property_add_dir(struct tb_property_dir *parent, const char *key,
struct tb_property_dir *dir)
{
struct tb_property *property;
if (!tb_property_key_valid(key))
return -EINVAL;
property = tb_property_alloc(key, TB_PROPERTY_TYPE_DIRECTORY);
if (!property)
return -ENOMEM;
property->value.dir = dir;
list_add_tail(&property->list, &parent->properties);
return 0;
}
EXPORT_SYMBOL_GPL(tb_property_add_dir);
/**
* tb_property_remove() - Removes property from a parent directory
* @property: Property to remove
*
* Note memory for @property is released as well so it is not allowed to
* touch the object after call to this function.
*/
void tb_property_remove(struct tb_property *property)
{
list_del(&property->list);
kfree(property);
}
EXPORT_SYMBOL_GPL(tb_property_remove);
/**
* tb_property_find() - Find a property from a directory
* @dir: Directory where the property is searched
* @key: Key to look for
* @type: Type of the property
*
* Finds and returns property from the given directory. Does not recurse
* into sub-directories. Returns %NULL if the property was not found.
*/
struct tb_property *tb_property_find(struct tb_property_dir *dir,
const char *key, enum tb_property_type type)
{
struct tb_property *property;
list_for_each_entry(property, &dir->properties, list) {
if (property->type == type && !strcmp(property->key, key))
return property;
}
return NULL;
}
EXPORT_SYMBOL_GPL(tb_property_find);
/**
* tb_property_get_next() - Get next property from directory
* @dir: Directory holding properties
* @prev: Previous property in the directory (%NULL returns the first)
*/
struct tb_property *tb_property_get_next(struct tb_property_dir *dir,
struct tb_property *prev)
{
if (prev) {
if (list_is_last(&prev->list, &dir->properties))
return NULL;
return list_next_entry(prev, list);
}
return list_first_entry_or_null(&dir->properties, struct tb_property,
list);
}
EXPORT_SYMBOL_GPL(tb_property_get_next);
| linux-master | drivers/thunderbolt/property.c |
// SPDX-License-Identifier: GPL-2.0
/*
* CLx support
*
* Copyright (C) 2020 - 2023, Intel Corporation
* Authors: Gil Fine <[email protected]>
* Mika Westerberg <[email protected]>
*/
#include <linux/module.h>
#include "tb.h"
static bool clx_enabled = true;
module_param_named(clx, clx_enabled, bool, 0444);
MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
static const char *clx_name(unsigned int clx)
{
switch (clx) {
case TB_CL0S | TB_CL1 | TB_CL2:
return "CL0s/CL1/CL2";
case TB_CL1 | TB_CL2:
return "CL1/CL2";
case TB_CL0S | TB_CL2:
return "CL0s/CL2";
case TB_CL0S | TB_CL1:
return "CL0s/CL1";
case TB_CL0S:
return "CL0s";
case 0:
return "disabled";
default:
return "unknown";
}
}
static int tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
{
u32 phy;
int ret;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (secondary)
phy |= LANE_ADP_CS_1_PMS;
else
phy &= ~LANE_ADP_CS_1_PMS;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_pm_secondary_enable(struct tb_port *port)
{
return tb_port_pm_secondary_set(port, true);
}
static int tb_port_pm_secondary_disable(struct tb_port *port)
{
return tb_port_pm_secondary_set(port, false);
}
/* Called for USB4 or Titan Ridge routers only */
static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx)
{
u32 val, mask = 0;
bool ret;
/* Don't enable CLx in case of two single-lane links */
if (!port->bonded && port->dual_link_port)
return false;
/* Don't enable CLx in case of inter-domain link */
if (port->xdomain)
return false;
if (tb_switch_is_usb4(port->sw)) {
if (!usb4_port_clx_supported(port))
return false;
} else if (!tb_lc_is_clx_supported(port)) {
return false;
}
if (clx & TB_CL0S)
mask |= LANE_ADP_CS_0_CL0S_SUPPORT;
if (clx & TB_CL1)
mask |= LANE_ADP_CS_0_CL1_SUPPORT;
if (clx & TB_CL2)
mask |= LANE_ADP_CS_0_CL2_SUPPORT;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_0, 1);
if (ret)
return false;
return !!(val & mask);
}
static int tb_port_clx_set(struct tb_port *port, unsigned int clx, bool enable)
{
u32 phy, mask = 0;
int ret;
if (clx & TB_CL0S)
mask |= LANE_ADP_CS_1_CL0S_ENABLE;
if (clx & TB_CL1)
mask |= LANE_ADP_CS_1_CL1_ENABLE;
if (clx & TB_CL2)
mask |= LANE_ADP_CS_1_CL2_ENABLE;
if (!mask)
return -EOPNOTSUPP;
ret = tb_port_read(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (enable)
phy |= mask;
else
phy &= ~mask;
return tb_port_write(port, &phy, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
}
static int tb_port_clx_disable(struct tb_port *port, unsigned int clx)
{
return tb_port_clx_set(port, clx, false);
}
static int tb_port_clx_enable(struct tb_port *port, unsigned int clx)
{
return tb_port_clx_set(port, clx, true);
}
static int tb_port_clx(struct tb_port *port)
{
u32 val;
int ret;
if (!tb_port_clx_supported(port, TB_CL0S | TB_CL1 | TB_CL2))
return 0;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
if (val & LANE_ADP_CS_1_CL0S_ENABLE)
ret |= TB_CL0S;
if (val & LANE_ADP_CS_1_CL1_ENABLE)
ret |= TB_CL1;
if (val & LANE_ADP_CS_1_CL2_ENABLE)
ret |= TB_CL2;
return ret;
}
/**
* tb_port_clx_is_enabled() - Is given CL state enabled
* @port: USB4 port to check
* @clx: Mask of CL states to check
*
* Returns true if any of the given CL states is enabled for @port.
*/
bool tb_port_clx_is_enabled(struct tb_port *port, unsigned int clx)
{
return !!(tb_port_clx(port) & clx);
}
/**
* tb_switch_clx_init() - Initialize router CL states
* @sw: Router
*
* Can be called for any router. Initializes the current CL state by
* reading it from the hardware.
*
* Returns %0 in case of success and negative errno in case of failure.
*/
int tb_switch_clx_init(struct tb_switch *sw)
{
struct tb_port *up, *down;
unsigned int clx, tmp;
if (tb_switch_is_icm(sw))
return 0;
if (!tb_route(sw))
return 0;
if (!tb_switch_clx_is_supported(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
clx = tb_port_clx(up);
tmp = tb_port_clx(down);
if (clx != tmp)
tb_sw_warn(sw, "CLx: inconsistent configuration %#x != %#x\n",
clx, tmp);
tb_sw_dbg(sw, "CLx: current mode: %s\n", clx_name(clx));
sw->clx = clx;
return 0;
}
static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
{
struct tb_port *up, *down;
int ret;
if (!tb_route(sw))
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
ret = tb_port_pm_secondary_enable(up);
if (ret)
return ret;
return tb_port_pm_secondary_disable(down);
}
static int tb_switch_mask_clx_objections(struct tb_switch *sw)
{
int up_port = sw->config.upstream_port_number;
u32 offset, val[2], mask_obj, unmask_obj;
int ret, i;
/* Only Titan Ridge of pre-USB4 devices support CLx states */
if (!tb_switch_is_titan_ridge(sw))
return 0;
if (!tb_route(sw))
return 0;
/*
* In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
* Port A consists of lane adapters 1,2 and
* Port B consists of lane adapters 3,4
* If upstream port is A, (lanes are 1,2), we mask objections from
* port B (lanes 3,4) and unmask objections from Port A and vice-versa.
*/
if (up_port == 1) {
mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
offset = TB_LOW_PWR_C1_CL1;
} else {
mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
offset = TB_LOW_PWR_C3_CL1;
}
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->cap_lp + offset, ARRAY_SIZE(val));
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(val); i++) {
val[i] |= mask_obj;
val[i] &= ~unmask_obj;
}
return tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->cap_lp + offset, ARRAY_SIZE(val));
}
/**
* tb_switch_clx_is_supported() - Is CLx supported on this type of router
* @sw: The router to check CLx support for
*/
bool tb_switch_clx_is_supported(const struct tb_switch *sw)
{
if (!clx_enabled)
return false;
if (sw->quirks & QUIRK_NO_CLX)
return false;
/*
* CLx is not enabled and validated on Intel USB4 platforms
* before Alder Lake.
*/
if (tb_switch_is_tiger_lake(sw))
return false;
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
}
static bool validate_mask(unsigned int clx)
{
/* Previous states need to be enabled */
if (clx & TB_CL1)
return (clx & TB_CL0S) == TB_CL0S;
return true;
}
/**
* tb_switch_clx_enable() - Enable CLx on upstream port of specified router
* @sw: Router to enable CLx for
* @clx: The CLx state to enable
*
* CLx is enabled only if both sides of the link support CLx, and if both sides
* of the link are not configured as two single lane links and only if the link
* is not inter-domain link. The complete set of conditions is described in CM
* Guide 1.0 section 8.1.
*
* Returns %0 on success or an error code on failure.
*/
int tb_switch_clx_enable(struct tb_switch *sw, unsigned int clx)
{
bool up_clx_support, down_clx_support;
struct tb_switch *parent_sw;
struct tb_port *up, *down;
int ret;
if (!clx || sw->clx == clx)
return 0;
if (!validate_mask(clx))
return -EINVAL;
parent_sw = tb_switch_parent(sw);
if (!parent_sw)
return 0;
if (!tb_switch_clx_is_supported(parent_sw) ||
!tb_switch_clx_is_supported(sw))
return 0;
/* Only support CL2 for v2 routers */
if ((clx & TB_CL2) &&
(usb4_switch_version(parent_sw) < 2 ||
usb4_switch_version(sw) < 2))
return -EOPNOTSUPP;
ret = tb_switch_pm_secondary_resolve(sw);
if (ret)
return ret;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
up_clx_support = tb_port_clx_supported(up, clx);
down_clx_support = tb_port_clx_supported(down, clx);
tb_port_dbg(up, "CLx: %s %ssupported\n", clx_name(clx),
up_clx_support ? "" : "not ");
tb_port_dbg(down, "CLx: %s %ssupported\n", clx_name(clx),
down_clx_support ? "" : "not ");
if (!up_clx_support || !down_clx_support)
return -EOPNOTSUPP;
ret = tb_port_clx_enable(up, clx);
if (ret)
return ret;
ret = tb_port_clx_enable(down, clx);
if (ret) {
tb_port_clx_disable(up, clx);
return ret;
}
ret = tb_switch_mask_clx_objections(sw);
if (ret) {
tb_port_clx_disable(up, clx);
tb_port_clx_disable(down, clx);
return ret;
}
sw->clx |= clx;
tb_sw_dbg(sw, "CLx: %s enabled\n", clx_name(clx));
return 0;
}
/**
* tb_switch_clx_disable() - Disable CLx on upstream port of specified router
* @sw: Router to disable CLx for
*
* Disables all CL states of the given router. Can be called on any
* router and if the states were not enabled already does nothing.
*
* Returns the CL states that were disabled or negative errno in case of
* failure.
*/
int tb_switch_clx_disable(struct tb_switch *sw)
{
unsigned int clx = sw->clx;
struct tb_port *up, *down;
int ret;
if (!tb_switch_clx_is_supported(sw))
return 0;
if (!clx)
return 0;
up = tb_upstream_port(sw);
down = tb_switch_downstream_port(sw);
ret = tb_port_clx_disable(up, clx);
if (ret)
return ret;
ret = tb_port_clx_disable(down, clx);
if (ret)
return ret;
sw->clx = 0;
tb_sw_dbg(sw, "CLx: %s disabled\n", clx_name(clx));
return clx;
}
| linux-master | drivers/thunderbolt/clx.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt driver - eeprom access
*
* Copyright (c) 2014 Andreas Noever <[email protected]>
* Copyright (C) 2018, Intel Corporation
*/
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/property.h>
#include <linux/slab.h>
#include "tb.h"
/*
* tb_eeprom_ctl_write() - write control word
*/
static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
{
return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + ROUTER_CS_4, 1);
}
/*
* tb_eeprom_ctl_write() - read control word
*/
static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl)
{
return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + ROUTER_CS_4, 1);
}
enum tb_eeprom_transfer {
TB_EEPROM_IN,
TB_EEPROM_OUT,
};
/*
* tb_eeprom_active - enable rom access
*
* WARNING: Always disable access after usage. Otherwise the controller will
* fail to reprobe.
*/
static int tb_eeprom_active(struct tb_switch *sw, bool enable)
{
struct tb_eeprom_ctl ctl;
int res = tb_eeprom_ctl_read(sw, &ctl);
if (res)
return res;
if (enable) {
ctl.bit_banging_enable = 1;
res = tb_eeprom_ctl_write(sw, &ctl);
if (res)
return res;
ctl.fl_cs = 0;
return tb_eeprom_ctl_write(sw, &ctl);
} else {
ctl.fl_cs = 1;
res = tb_eeprom_ctl_write(sw, &ctl);
if (res)
return res;
ctl.bit_banging_enable = 0;
return tb_eeprom_ctl_write(sw, &ctl);
}
}
/*
* tb_eeprom_transfer - transfer one bit
*
* If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->fl_do.
* If TB_EEPROM_OUT is passed, then ctl->fl_di will be written.
*/
static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl,
enum tb_eeprom_transfer direction)
{
int res;
if (direction == TB_EEPROM_OUT) {
res = tb_eeprom_ctl_write(sw, ctl);
if (res)
return res;
}
ctl->fl_sk = 1;
res = tb_eeprom_ctl_write(sw, ctl);
if (res)
return res;
if (direction == TB_EEPROM_IN) {
res = tb_eeprom_ctl_read(sw, ctl);
if (res)
return res;
}
ctl->fl_sk = 0;
return tb_eeprom_ctl_write(sw, ctl);
}
/*
* tb_eeprom_out - write one byte to the bus
*/
static int tb_eeprom_out(struct tb_switch *sw, u8 val)
{
struct tb_eeprom_ctl ctl;
int i;
int res = tb_eeprom_ctl_read(sw, &ctl);
if (res)
return res;
for (i = 0; i < 8; i++) {
ctl.fl_di = val & 0x80;
res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT);
if (res)
return res;
val <<= 1;
}
return 0;
}
/*
* tb_eeprom_in - read one byte from the bus
*/
static int tb_eeprom_in(struct tb_switch *sw, u8 *val)
{
struct tb_eeprom_ctl ctl;
int i;
int res = tb_eeprom_ctl_read(sw, &ctl);
if (res)
return res;
*val = 0;
for (i = 0; i < 8; i++) {
*val <<= 1;
res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN);
if (res)
return res;
*val |= ctl.fl_do;
}
return 0;
}
/*
* tb_eeprom_get_drom_offset - get drom offset within eeprom
*/
static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset)
{
struct tb_cap_plug_events cap;
int res;
if (!sw->cap_plug_events) {
tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n");
return -ENODEV;
}
res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events,
sizeof(cap) / 4);
if (res)
return res;
if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) {
tb_sw_warn(sw, "no NVM\n");
return -ENODEV;
}
if (cap.drom_offset > 0xffff) {
tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n",
cap.drom_offset);
return -ENXIO;
}
*offset = cap.drom_offset;
return 0;
}
/*
* tb_eeprom_read_n - read count bytes from offset into val
*/
static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val,
size_t count)
{
u16 drom_offset;
int i, res;
res = tb_eeprom_get_drom_offset(sw, &drom_offset);
if (res)
return res;
offset += drom_offset;
res = tb_eeprom_active(sw, true);
if (res)
return res;
res = tb_eeprom_out(sw, 3);
if (res)
return res;
res = tb_eeprom_out(sw, offset >> 8);
if (res)
return res;
res = tb_eeprom_out(sw, offset);
if (res)
return res;
for (i = 0; i < count; i++) {
res = tb_eeprom_in(sw, val + i);
if (res)
return res;
}
return tb_eeprom_active(sw, false);
}
static u8 tb_crc8(u8 *data, int len)
{
int i, j;
u8 val = 0xff;
for (i = 0; i < len; i++) {
val ^= data[i];
for (j = 0; j < 8; j++)
val = (val << 1) ^ ((val & 0x80) ? 7 : 0);
}
return val;
}
static u32 tb_crc32(void *data, size_t len)
{
return ~__crc32c_le(~0, data, len);
}
#define TB_DROM_DATA_START 13
#define TB_DROM_HEADER_SIZE 22
#define USB4_DROM_HEADER_SIZE 16
struct tb_drom_header {
/* BYTE 0 */
u8 uid_crc8; /* checksum for uid */
/* BYTES 1-8 */
u64 uid;
/* BYTES 9-12 */
u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */
/* BYTE 13 */
u8 device_rom_revision; /* should be <= 1 */
u16 data_len:12;
u8 reserved:4;
/* BYTES 16-21 - Only for TBT DROM, nonexistent in USB4 DROM */
u16 vendor_id;
u16 model_id;
u8 model_rev;
u8 eeprom_rev;
} __packed;
enum tb_drom_entry_type {
/* force unsigned to prevent "one-bit signed bitfield" warning */
TB_DROM_ENTRY_GENERIC = 0U,
TB_DROM_ENTRY_PORT,
};
struct tb_drom_entry_header {
u8 len;
u8 index:6;
bool port_disabled:1; /* only valid if type is TB_DROM_ENTRY_PORT */
enum tb_drom_entry_type type:1;
} __packed;
struct tb_drom_entry_generic {
struct tb_drom_entry_header header;
u8 data[];
} __packed;
struct tb_drom_entry_port {
/* BYTES 0-1 */
struct tb_drom_entry_header header;
/* BYTE 2 */
u8 dual_link_port_rid:4;
u8 link_nr:1;
u8 unknown1:2;
bool has_dual_link_port:1;
/* BYTE 3 */
u8 dual_link_port_nr:6;
u8 unknown2:2;
/* BYTES 4 - 5 TODO decode */
u8 micro2:4;
u8 micro1:4;
u8 micro3;
/* BYTES 6-7, TODO: verify (find hardware that has these set) */
u8 peer_port_rid:4;
u8 unknown3:3;
bool has_peer_port:1;
u8 peer_port_nr:6;
u8 unknown4:2;
} __packed;
/* USB4 product descriptor */
struct tb_drom_entry_desc {
struct tb_drom_entry_header header;
u16 bcdUSBSpec;
u16 idVendor;
u16 idProduct;
u16 bcdProductFWRevision;
u32 TID;
u8 productHWRevision;
};
/**
* tb_drom_read_uid_only() - Read UID directly from DROM
* @sw: Router whose UID to read
* @uid: UID is placed here
*
* Does not use the cached copy in sw->drom. Used during resume to check switch
* identity.
*/
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid)
{
u8 data[9];
u8 crc;
int res;
/* read uid */
res = tb_eeprom_read_n(sw, 0, data, 9);
if (res)
return res;
crc = tb_crc8(data + 1, 8);
if (crc != data[0]) {
tb_sw_warn(sw, "uid crc8 mismatch (expected: %#x, got: %#x)\n",
data[0], crc);
return -EIO;
}
*uid = *(u64 *)(data+1);
return 0;
}
static int tb_drom_parse_entry_generic(struct tb_switch *sw,
struct tb_drom_entry_header *header)
{
const struct tb_drom_entry_generic *entry =
(const struct tb_drom_entry_generic *)header;
switch (header->index) {
case 1:
/* Length includes 2 bytes header so remove it before copy */
sw->vendor_name = kstrndup(entry->data,
header->len - sizeof(*header), GFP_KERNEL);
if (!sw->vendor_name)
return -ENOMEM;
break;
case 2:
sw->device_name = kstrndup(entry->data,
header->len - sizeof(*header), GFP_KERNEL);
if (!sw->device_name)
return -ENOMEM;
break;
case 9: {
const struct tb_drom_entry_desc *desc =
(const struct tb_drom_entry_desc *)entry;
if (!sw->vendor && !sw->device) {
sw->vendor = desc->idVendor;
sw->device = desc->idProduct;
}
break;
}
}
return 0;
}
static int tb_drom_parse_entry_port(struct tb_switch *sw,
struct tb_drom_entry_header *header)
{
struct tb_port *port;
int res;
enum tb_port_type type;
/*
* Some DROMs list more ports than the controller actually has
* so we skip those but allow the parser to continue.
*/
if (header->index > sw->config.max_port_number) {
dev_info_once(&sw->dev, "ignoring unnecessary extra entries in DROM\n");
return 0;
}
port = &sw->ports[header->index];
port->disabled = header->port_disabled;
if (port->disabled)
return 0;
res = tb_port_read(port, &type, TB_CFG_PORT, 2, 1);
if (res)
return res;
type &= 0xffffff;
if (type == TB_TYPE_PORT) {
struct tb_drom_entry_port *entry = (void *) header;
if (header->len != sizeof(*entry)) {
tb_sw_warn(sw,
"port entry has size %#x (expected %#zx)\n",
header->len, sizeof(struct tb_drom_entry_port));
return -EIO;
}
port->link_nr = entry->link_nr;
if (entry->has_dual_link_port)
port->dual_link_port =
&port->sw->ports[entry->dual_link_port_nr];
}
return 0;
}
/*
* tb_drom_parse_entries - parse the linked list of drom entries
*
* Drom must have been copied to sw->drom.
*/
static int tb_drom_parse_entries(struct tb_switch *sw, size_t header_size)
{
struct tb_drom_header *header = (void *) sw->drom;
u16 pos = header_size;
u16 drom_size = header->data_len + TB_DROM_DATA_START;
int res;
while (pos < drom_size) {
struct tb_drom_entry_header *entry = (void *) (sw->drom + pos);
if (pos + 1 == drom_size || pos + entry->len > drom_size
|| !entry->len) {
tb_sw_warn(sw, "DROM buffer overrun\n");
return -EIO;
}
switch (entry->type) {
case TB_DROM_ENTRY_GENERIC:
res = tb_drom_parse_entry_generic(sw, entry);
break;
case TB_DROM_ENTRY_PORT:
res = tb_drom_parse_entry_port(sw, entry);
break;
}
if (res)
return res;
pos += entry->len;
}
return 0;
}
/*
* tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present
*/
static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size)
{
struct device *dev = &sw->tb->nhi->pdev->dev;
int len, res;
len = device_property_count_u8(dev, "ThunderboltDROM");
if (len < 0 || len < sizeof(struct tb_drom_header))
return -EINVAL;
sw->drom = kmalloc(len, GFP_KERNEL);
if (!sw->drom)
return -ENOMEM;
res = device_property_read_u8_array(dev, "ThunderboltDROM", sw->drom,
len);
if (res)
goto err;
*size = ((struct tb_drom_header *)sw->drom)->data_len +
TB_DROM_DATA_START;
if (*size > len)
goto err;
return 0;
err:
kfree(sw->drom);
sw->drom = NULL;
return -EINVAL;
}
static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size)
{
u16 drom_offset;
int ret;
if (!sw->dma_port)
return -ENODEV;
ret = tb_eeprom_get_drom_offset(sw, &drom_offset);
if (ret)
return ret;
if (!drom_offset)
return -ENODEV;
ret = dma_port_flash_read(sw->dma_port, drom_offset + 14, size,
sizeof(*size));
if (ret)
return ret;
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
sw->drom = kzalloc(*size, GFP_KERNEL);
if (!sw->drom)
return -ENOMEM;
ret = dma_port_flash_read(sw->dma_port, drom_offset, sw->drom, *size);
if (ret)
goto err_free;
/*
* Read UID from the minimal DROM because the one in NVM is just
* a placeholder.
*/
tb_drom_read_uid_only(sw, &sw->uid);
return 0;
err_free:
kfree(sw->drom);
sw->drom = NULL;
return ret;
}
static int usb4_copy_drom(struct tb_switch *sw, u16 *size)
{
int ret;
ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size));
if (ret)
return ret;
/* Size includes CRC8 + UID + CRC32 */
*size += 1 + 8 + 4;
sw->drom = kzalloc(*size, GFP_KERNEL);
if (!sw->drom)
return -ENOMEM;
ret = usb4_switch_drom_read(sw, 0, sw->drom, *size);
if (ret) {
kfree(sw->drom);
sw->drom = NULL;
}
return ret;
}
static int tb_drom_bit_bang(struct tb_switch *sw, u16 *size)
{
int ret;
ret = tb_eeprom_read_n(sw, 14, (u8 *)size, 2);
if (ret)
return ret;
*size &= 0x3ff;
*size += TB_DROM_DATA_START;
tb_sw_dbg(sw, "reading DROM (length: %#x)\n", *size);
if (*size < sizeof(struct tb_drom_header)) {
tb_sw_warn(sw, "DROM too small, aborting\n");
return -EIO;
}
sw->drom = kzalloc(*size, GFP_KERNEL);
if (!sw->drom)
return -ENOMEM;
ret = tb_eeprom_read_n(sw, 0, sw->drom, *size);
if (ret)
goto err;
return 0;
err:
kfree(sw->drom);
sw->drom = NULL;
return ret;
}
static int tb_drom_parse_v1(struct tb_switch *sw)
{
const struct tb_drom_header *header =
(const struct tb_drom_header *)sw->drom;
u32 crc;
crc = tb_crc8((u8 *) &header->uid, 8);
if (crc != header->uid_crc8) {
tb_sw_warn(sw,
"DROM UID CRC8 mismatch (expected: %#x, got: %#x)\n",
header->uid_crc8, crc);
return -EIO;
}
if (!sw->uid)
sw->uid = header->uid;
sw->vendor = header->vendor_id;
sw->device = header->model_id;
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
"DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n",
header->data_crc32, crc);
}
return tb_drom_parse_entries(sw, TB_DROM_HEADER_SIZE);
}
static int usb4_drom_parse(struct tb_switch *sw)
{
const struct tb_drom_header *header =
(const struct tb_drom_header *)sw->drom;
u32 crc;
crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len);
if (crc != header->data_crc32) {
tb_sw_warn(sw,
"DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n",
header->data_crc32, crc);
}
return tb_drom_parse_entries(sw, USB4_DROM_HEADER_SIZE);
}
static int tb_drom_parse(struct tb_switch *sw, u16 size)
{
const struct tb_drom_header *header = (const void *)sw->drom;
int ret;
if (header->data_len + TB_DROM_DATA_START != size) {
tb_sw_warn(sw, "DROM size mismatch\n");
ret = -EIO;
goto err;
}
tb_sw_dbg(sw, "DROM version: %d\n", header->device_rom_revision);
switch (header->device_rom_revision) {
case 3:
ret = usb4_drom_parse(sw);
break;
default:
tb_sw_warn(sw, "DROM device_rom_revision %#x unknown\n",
header->device_rom_revision);
fallthrough;
case 1:
ret = tb_drom_parse_v1(sw);
break;
}
if (ret) {
tb_sw_warn(sw, "parsing DROM failed\n");
goto err;
}
return 0;
err:
kfree(sw->drom);
sw->drom = NULL;
return ret;
}
static int tb_drom_host_read(struct tb_switch *sw)
{
u16 size;
if (tb_switch_is_usb4(sw)) {
usb4_switch_read_uid(sw, &sw->uid);
if (!usb4_copy_drom(sw, &size))
return tb_drom_parse(sw, size);
} else {
if (!tb_drom_copy_efi(sw, &size))
return tb_drom_parse(sw, size);
if (!tb_drom_copy_nvm(sw, &size))
return tb_drom_parse(sw, size);
tb_drom_read_uid_only(sw, &sw->uid);
}
return 0;
}
static int tb_drom_device_read(struct tb_switch *sw)
{
u16 size;
int ret;
if (tb_switch_is_usb4(sw)) {
usb4_switch_read_uid(sw, &sw->uid);
ret = usb4_copy_drom(sw, &size);
} else {
ret = tb_drom_bit_bang(sw, &size);
}
if (ret)
return ret;
return tb_drom_parse(sw, size);
}
/**
* tb_drom_read() - Copy DROM to sw->drom and parse it
* @sw: Router whose DROM to read and parse
*
* This function reads router DROM and if successful parses the entries and
* populates the fields in @sw accordingly. Can be called for any router
* generation.
*
* Returns %0 in case of success and negative errno otherwise.
*/
int tb_drom_read(struct tb_switch *sw)
{
if (sw->drom)
return 0;
if (!tb_route(sw))
return tb_drom_host_read(sw);
return tb_drom_device_read(sw);
}
| linux-master | drivers/thunderbolt/eeprom.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt link controller support
*
* Copyright (C) 2019, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include "tb.h"
/**
* tb_lc_read_uuid() - Read switch UUID from link controller common register
* @sw: Switch whose UUID is read
* @uuid: UUID is placed here
*/
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid)
{
if (!sw->cap_lc)
return -EINVAL;
return tb_sw_read(sw, uuid, TB_CFG_SWITCH, sw->cap_lc + TB_LC_FUSE, 4);
}
static int read_lc_desc(struct tb_switch *sw, u32 *desc)
{
if (!sw->cap_lc)
return -EINVAL;
return tb_sw_read(sw, desc, TB_CFG_SWITCH, sw->cap_lc + TB_LC_DESC, 1);
}
static int find_port_lc_cap(struct tb_port *port)
{
struct tb_switch *sw = port->sw;
int start, phys, ret, size;
u32 desc;
ret = read_lc_desc(sw, &desc);
if (ret)
return ret;
/* Start of port LC registers */
start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
phys = tb_phy_port_from_link(port->port);
return sw->cap_lc + start + phys * size;
}
static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{
bool upstream = tb_is_upstream_port(port);
struct tb_switch *sw = port->sw;
u32 ctrl, lane;
int cap, ret;
if (sw->generation < 2)
return 0;
cap = find_port_lc_cap(port);
if (cap < 0)
return cap;
ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
if (ret)
return ret;
/* Resolve correct lane */
if (port->port % 2)
lane = TB_LC_SX_CTRL_L1C;
else
lane = TB_LC_SX_CTRL_L2C;
if (configured) {
ctrl |= lane;
if (upstream)
ctrl |= TB_LC_SX_CTRL_UPSTREAM;
} else {
ctrl &= ~lane;
if (upstream)
ctrl &= ~TB_LC_SX_CTRL_UPSTREAM;
}
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
}
/**
* tb_lc_configure_port() - Let LC know about configured port
* @port: Port that is set as configured
*
* Sets the port configured for power management purposes.
*/
int tb_lc_configure_port(struct tb_port *port)
{
return tb_lc_set_port_configured(port, true);
}
/**
* tb_lc_unconfigure_port() - Let LC know about unconfigured port
* @port: Port that is set as configured
*
* Sets the port unconfigured for power management purposes.
*/
void tb_lc_unconfigure_port(struct tb_port *port)
{
tb_lc_set_port_configured(port, false);
}
static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure)
{
struct tb_switch *sw = port->sw;
u32 ctrl, lane;
int cap, ret;
if (sw->generation < 2)
return 0;
cap = find_port_lc_cap(port);
if (cap < 0)
return cap;
ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
if (ret)
return ret;
/* Resolve correct lane */
if (port->port % 2)
lane = TB_LC_SX_CTRL_L1D;
else
lane = TB_LC_SX_CTRL_L2D;
if (configure)
ctrl |= lane;
else
ctrl &= ~lane;
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
}
/**
* tb_lc_configure_xdomain() - Inform LC that the link is XDomain
* @port: Switch downstream port connected to another host
*
* Sets the lane configured for XDomain accordingly so that the LC knows
* about this. Returns %0 in success and negative errno in failure.
*/
int tb_lc_configure_xdomain(struct tb_port *port)
{
return tb_lc_set_xdomain_configured(port, true);
}
/**
* tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port
* @port: Switch downstream port that was connected to another host
*
* Unsets the lane XDomain configuration.
*/
void tb_lc_unconfigure_xdomain(struct tb_port *port)
{
tb_lc_set_xdomain_configured(port, false);
}
/**
* tb_lc_start_lane_initialization() - Start lane initialization
* @port: Device router lane 0 adapter
*
* Starts lane initialization for @port after the router resumed from
* sleep. Should be called for those downstream lane adapters that were
* not connected (tb_lc_configure_port() was not called) before sleep.
*
* Returns %0 in success and negative errno in case of failure.
*/
int tb_lc_start_lane_initialization(struct tb_port *port)
{
struct tb_switch *sw = port->sw;
int ret, cap;
u32 ctrl;
if (!tb_route(sw))
return 0;
if (sw->generation < 2)
return 0;
cap = find_port_lc_cap(port);
if (cap < 0)
return cap;
ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
if (ret)
return ret;
ctrl |= TB_LC_SX_CTRL_SLI;
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
}
/**
* tb_lc_is_clx_supported() - Check whether CLx is supported by the lane adapter
* @port: Lane adapter
*
* TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including
* active cables (if connected on the link).
*/
bool tb_lc_is_clx_supported(struct tb_port *port)
{
struct tb_switch *sw = port->sw;
int cap, ret;
u32 val;
cap = find_port_lc_cap(port);
if (cap < 0)
return false;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_ATTR, 1);
if (ret)
return false;
return !!(val & TB_LC_LINK_ATTR_CPS);
}
/**
* tb_lc_is_usb_plugged() - Is there USB device connected to port
* @port: Device router lane 0 adapter
*
* Returns true if the @port has USB type-C device connected.
*/
bool tb_lc_is_usb_plugged(struct tb_port *port)
{
struct tb_switch *sw = port->sw;
int cap, ret;
u32 val;
if (sw->generation != 3)
return false;
cap = find_port_lc_cap(port);
if (cap < 0)
return false;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_CS_42, 1);
if (ret)
return false;
return !!(val & TB_LC_CS_42_USB_PLUGGED);
}
/**
* tb_lc_is_xhci_connected() - Is the internal xHCI connected
* @port: Device router lane 0 adapter
*
* Returns true if the internal xHCI has been connected to @port.
*/
bool tb_lc_is_xhci_connected(struct tb_port *port)
{
struct tb_switch *sw = port->sw;
int cap, ret;
u32 val;
if (sw->generation != 3)
return false;
cap = find_port_lc_cap(port);
if (cap < 0)
return false;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1);
if (ret)
return false;
return !!(val & TB_LC_LINK_REQ_XHCI_CONNECT);
}
static int __tb_lc_xhci_connect(struct tb_port *port, bool connect)
{
struct tb_switch *sw = port->sw;
int cap, ret;
u32 val;
if (sw->generation != 3)
return -EINVAL;
cap = find_port_lc_cap(port);
if (cap < 0)
return cap;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1);
if (ret)
return ret;
if (connect)
val |= TB_LC_LINK_REQ_XHCI_CONNECT;
else
val &= ~TB_LC_LINK_REQ_XHCI_CONNECT;
return tb_sw_write(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1);
}
/**
* tb_lc_xhci_connect() - Connect internal xHCI
* @port: Device router lane 0 adapter
*
* Tells LC to connect the internal xHCI to @port. Returns %0 on success
* and negative errno in case of failure. Can be called for Thunderbolt 3
* routers only.
*/
int tb_lc_xhci_connect(struct tb_port *port)
{
int ret;
ret = __tb_lc_xhci_connect(port, true);
if (ret)
return ret;
tb_port_dbg(port, "xHCI connected\n");
return 0;
}
/**
* tb_lc_xhci_disconnect() - Disconnect internal xHCI
* @port: Device router lane 0 adapter
*
* Tells LC to disconnect the internal xHCI from @port. Can be called
* for Thunderbolt 3 routers only.
*/
void tb_lc_xhci_disconnect(struct tb_port *port)
{
__tb_lc_xhci_connect(port, false);
tb_port_dbg(port, "xHCI disconnected\n");
}
static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
unsigned int flags)
{
u32 ctrl;
int ret;
/*
* Enable wake on PCIe and USB4 (wake coming from another
* router).
*/
ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
offset + TB_LC_SX_CTRL, 1);
if (ret)
return ret;
ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WODPC |
TB_LC_SX_CTRL_WODPD | TB_LC_SX_CTRL_WOP | TB_LC_SX_CTRL_WOU4);
if (flags & TB_WAKE_ON_CONNECT)
ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD;
if (flags & TB_WAKE_ON_USB4)
ctrl |= TB_LC_SX_CTRL_WOU4;
if (flags & TB_WAKE_ON_PCIE)
ctrl |= TB_LC_SX_CTRL_WOP;
if (flags & TB_WAKE_ON_DP)
ctrl |= TB_LC_SX_CTRL_WODPC | TB_LC_SX_CTRL_WODPD;
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1);
}
/**
* tb_lc_set_wake() - Enable/disable wake
* @sw: Switch whose wakes to configure
* @flags: Wakeup flags (%0 to disable)
*
* For each LC sets wake bits accordingly.
*/
int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags)
{
int start, size, nlc, ret, i;
u32 desc;
if (sw->generation < 2)
return 0;
if (!tb_route(sw))
return 0;
ret = read_lc_desc(sw, &desc);
if (ret)
return ret;
/* Figure out number of link controllers */
nlc = desc & TB_LC_DESC_NLC_MASK;
start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
/* For each link controller set sleep bit */
for (i = 0; i < nlc; i++) {
unsigned int offset = sw->cap_lc + start + i * size;
ret = tb_lc_set_wake_one(sw, offset, flags);
if (ret)
return ret;
}
return 0;
}
/**
* tb_lc_set_sleep() - Inform LC that the switch is going to sleep
* @sw: Switch to set sleep
*
* Let the switch link controllers know that the switch is going to
* sleep.
*/
int tb_lc_set_sleep(struct tb_switch *sw)
{
int start, size, nlc, ret, i;
u32 desc;
if (sw->generation < 2)
return 0;
ret = read_lc_desc(sw, &desc);
if (ret)
return ret;
/* Figure out number of link controllers */
nlc = desc & TB_LC_DESC_NLC_MASK;
start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT;
size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT;
/* For each link controller set sleep bit */
for (i = 0; i < nlc; i++) {
unsigned int offset = sw->cap_lc + start + i * size;
u32 ctrl;
ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH,
offset + TB_LC_SX_CTRL, 1);
if (ret)
return ret;
ctrl |= TB_LC_SX_CTRL_SLP;
ret = tb_sw_write(sw, &ctrl, TB_CFG_SWITCH,
offset + TB_LC_SX_CTRL, 1);
if (ret)
return ret;
}
return 0;
}
/**
* tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch
* @sw: Switch to check
*
* Checks whether conditions for lane bonding from parent to @sw are
* possible.
*/
bool tb_lc_lane_bonding_possible(struct tb_switch *sw)
{
struct tb_port *up;
int cap, ret;
u32 val;
if (sw->generation < 2)
return false;
up = tb_upstream_port(sw);
cap = find_port_lc_cap(up);
if (cap < 0)
return false;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1);
if (ret)
return false;
return !!(val & TB_LC_PORT_ATTR_BE);
}
static int tb_lc_dp_sink_from_port(const struct tb_switch *sw,
struct tb_port *in)
{
struct tb_port *port;
/* The first DP IN port is sink 0 and second is sink 1 */
tb_switch_for_each_port(sw, port) {
if (tb_port_is_dpin(port))
return in != port;
}
return -EINVAL;
}
static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink)
{
u32 val, alloc;
int ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
if (ret)
return ret;
/*
* Sink is available for CM/SW to use if the allocation valie is
* either 0 or 1.
*/
if (!sink) {
alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK;
if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM)
return 0;
} else {
alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >>
TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM)
return 0;
}
return -EBUSY;
}
/**
* tb_lc_dp_sink_query() - Is DP sink available for DP IN port
* @sw: Switch whose DP sink is queried
* @in: DP IN port to check
*
* Queries through LC SNK_ALLOCATION registers whether DP sink is available
* for the given DP IN port or not.
*/
bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in)
{
int sink;
/*
* For older generations sink is always available as there is no
* allocation mechanism.
*/
if (sw->generation < 3)
return true;
sink = tb_lc_dp_sink_from_port(sw, in);
if (sink < 0)
return false;
return !tb_lc_dp_sink_available(sw, sink);
}
/**
* tb_lc_dp_sink_alloc() - Allocate DP sink
* @sw: Switch whose DP sink is allocated
* @in: DP IN port the DP sink is allocated for
*
* Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the
* resource is available and allocation is successful returns %0. In all
* other cases returs negative errno. In particular %-EBUSY is returned if
* the resource was not available.
*/
int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in)
{
int ret, sink;
u32 val;
if (sw->generation < 3)
return 0;
sink = tb_lc_dp_sink_from_port(sw, in);
if (sink < 0)
return sink;
ret = tb_lc_dp_sink_available(sw, sink);
if (ret)
return ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
if (ret)
return ret;
if (!sink) {
val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
val |= TB_LC_SNK_ALLOCATION_SNK0_CM;
} else {
val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
val |= TB_LC_SNK_ALLOCATION_SNK1_CM <<
TB_LC_SNK_ALLOCATION_SNK1_SHIFT;
}
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
if (ret)
return ret;
tb_port_dbg(in, "sink %d allocated\n", sink);
return 0;
}
/**
* tb_lc_dp_sink_dealloc() - De-allocate DP sink
* @sw: Switch whose DP sink is de-allocated
* @in: DP IN port whose DP sink is de-allocated
*
* De-allocate DP sink from @in using LC SNK_ALLOCATION registers.
*/
int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in)
{
int ret, sink;
u32 val;
if (sw->generation < 3)
return 0;
sink = tb_lc_dp_sink_from_port(sw, in);
if (sink < 0)
return sink;
/* Needs to be owned by CM/SW */
ret = tb_lc_dp_sink_available(sw, sink);
if (ret)
return ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
if (ret)
return ret;
if (!sink)
val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK;
else
val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH,
sw->cap_lc + TB_LC_SNK_ALLOCATION, 1);
if (ret)
return ret;
tb_port_dbg(in, "sink %d de-allocated\n", sink);
return 0;
}
/**
* tb_lc_force_power() - Forces LC to be powered on
* @sw: Thunderbolt switch
*
* This is useful to let authentication cycle pass even without
* a Thunderbolt link present.
*/
int tb_lc_force_power(struct tb_switch *sw)
{
u32 in = 0xffff;
return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1);
}
| linux-master | drivers/thunderbolt/lc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* DMA traffic test driver
*
* Copyright (C) 2020, Intel Corporation
* Authors: Isaac Hazan <[email protected]>
* Mika Westerberg <[email protected]>
*/
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/sizes.h>
#include <linux/thunderbolt.h>
#define DMA_TEST_TX_RING_SIZE 64
#define DMA_TEST_RX_RING_SIZE 256
#define DMA_TEST_FRAME_SIZE SZ_4K
#define DMA_TEST_DATA_PATTERN 0x0123456789abcdefLL
#define DMA_TEST_MAX_PACKETS 1000
enum dma_test_frame_pdf {
DMA_TEST_PDF_FRAME_START = 1,
DMA_TEST_PDF_FRAME_END,
};
struct dma_test_frame {
struct dma_test *dma_test;
void *data;
struct ring_frame frame;
};
enum dma_test_test_error {
DMA_TEST_NO_ERROR,
DMA_TEST_INTERRUPTED,
DMA_TEST_BUFFER_ERROR,
DMA_TEST_DMA_ERROR,
DMA_TEST_CONFIG_ERROR,
DMA_TEST_SPEED_ERROR,
DMA_TEST_WIDTH_ERROR,
DMA_TEST_BONDING_ERROR,
DMA_TEST_PACKET_ERROR,
};
static const char * const dma_test_error_names[] = {
[DMA_TEST_NO_ERROR] = "no errors",
[DMA_TEST_INTERRUPTED] = "interrupted by signal",
[DMA_TEST_BUFFER_ERROR] = "no memory for packet buffers",
[DMA_TEST_DMA_ERROR] = "DMA ring setup failed",
[DMA_TEST_CONFIG_ERROR] = "configuration is not valid",
[DMA_TEST_SPEED_ERROR] = "unexpected link speed",
[DMA_TEST_WIDTH_ERROR] = "unexpected link width",
[DMA_TEST_BONDING_ERROR] = "lane bonding configuration error",
[DMA_TEST_PACKET_ERROR] = "packet check failed",
};
enum dma_test_result {
DMA_TEST_NOT_RUN,
DMA_TEST_SUCCESS,
DMA_TEST_FAIL,
};
static const char * const dma_test_result_names[] = {
[DMA_TEST_NOT_RUN] = "not run",
[DMA_TEST_SUCCESS] = "success",
[DMA_TEST_FAIL] = "failed",
};
/**
* struct dma_test - DMA test device driver private data
* @svc: XDomain service the driver is bound to
* @xd: XDomain the service belongs to
* @rx_ring: Software ring holding RX frames
* @rx_hopid: HopID used for receiving frames
* @tx_ring: Software ring holding TX frames
* @tx_hopid: HopID used for sending fames
* @packets_to_send: Number of packets to send
* @packets_to_receive: Number of packets to receive
* @packets_sent: Actual number of packets sent
* @packets_received: Actual number of packets received
* @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated
* @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated
* @crc_errors: Number of CRC errors during the test run
* @buffer_overflow_errors: Number of buffer overflow errors during the test
* run
* @result: Result of the last run
* @error_code: Error code of the last run
* @complete: Used to wait for the Rx to complete
* @lock: Lock serializing access to this structure
* @debugfs_dir: dentry of this dma_test
*/
struct dma_test {
const struct tb_service *svc;
struct tb_xdomain *xd;
struct tb_ring *rx_ring;
int rx_hopid;
struct tb_ring *tx_ring;
int tx_hopid;
unsigned int packets_to_send;
unsigned int packets_to_receive;
unsigned int packets_sent;
unsigned int packets_received;
unsigned int link_speed;
unsigned int link_width;
unsigned int crc_errors;
unsigned int buffer_overflow_errors;
enum dma_test_result result;
enum dma_test_test_error error_code;
struct completion complete;
struct mutex lock;
struct dentry *debugfs_dir;
};
/* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */
static const uuid_t dma_test_dir_uuid =
UUID_INIT(0x3188cd10, 0x6523, 0x4a5a,
0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8);
static struct tb_property_dir *dma_test_dir;
static void *dma_test_pattern;
static void dma_test_free_rings(struct dma_test *dt)
{
if (dt->rx_ring) {
tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid);
tb_ring_free(dt->rx_ring);
dt->rx_ring = NULL;
}
if (dt->tx_ring) {
tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid);
tb_ring_free(dt->tx_ring);
dt->tx_ring = NULL;
}
}
static int dma_test_start_rings(struct dma_test *dt)
{
unsigned int flags = RING_FLAG_FRAME;
struct tb_xdomain *xd = dt->xd;
int ret, e2e_tx_hop = 0;
struct tb_ring *ring;
/*
* If we are both sender and receiver (traffic goes over a
* special loopback dongle) enable E2E flow control. This avoids
* losing packets.
*/
if (dt->packets_to_send && dt->packets_to_receive)
flags |= RING_FLAG_E2E;
if (dt->packets_to_send) {
ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE,
flags);
if (!ring)
return -ENOMEM;
dt->tx_ring = ring;
e2e_tx_hop = ring->hop;
ret = tb_xdomain_alloc_out_hopid(xd, -1);
if (ret < 0) {
dma_test_free_rings(dt);
return ret;
}
dt->tx_hopid = ret;
}
if (dt->packets_to_receive) {
u16 sof_mask, eof_mask;
sof_mask = BIT(DMA_TEST_PDF_FRAME_START);
eof_mask = BIT(DMA_TEST_PDF_FRAME_END);
ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE,
flags, e2e_tx_hop, sof_mask, eof_mask,
NULL, NULL);
if (!ring) {
dma_test_free_rings(dt);
return -ENOMEM;
}
dt->rx_ring = ring;
ret = tb_xdomain_alloc_in_hopid(xd, -1);
if (ret < 0) {
dma_test_free_rings(dt);
return ret;
}
dt->rx_hopid = ret;
}
ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : -1,
dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : -1);
if (ret) {
dma_test_free_rings(dt);
return ret;
}
if (dt->tx_ring)
tb_ring_start(dt->tx_ring);
if (dt->rx_ring)
tb_ring_start(dt->rx_ring);
return 0;
}
static void dma_test_stop_rings(struct dma_test *dt)
{
int ret;
if (dt->rx_ring)
tb_ring_stop(dt->rx_ring);
if (dt->tx_ring)
tb_ring_stop(dt->tx_ring);
ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid,
dt->tx_ring ? dt->tx_ring->hop : -1,
dt->rx_hopid,
dt->rx_ring ? dt->rx_ring->hop : -1);
if (ret)
dev_warn(&dt->svc->dev, "failed to disable DMA paths\n");
dma_test_free_rings(dt);
}
static void dma_test_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame);
struct dma_test *dt = tf->dma_test;
struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE,
DMA_FROM_DEVICE);
kfree(tf->data);
if (canceled) {
kfree(tf);
return;
}
dt->packets_received++;
dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received,
dt->packets_to_receive);
if (tf->frame.flags & RING_DESC_CRC_ERROR)
dt->crc_errors++;
if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN)
dt->buffer_overflow_errors++;
kfree(tf);
if (dt->packets_received == dt->packets_to_receive)
complete(&dt->complete);
}
static int dma_test_submit_rx(struct dma_test *dt, size_t npackets)
{
struct device *dma_dev = tb_ring_dma_device(dt->rx_ring);
int i;
for (i = 0; i < npackets; i++) {
struct dma_test_frame *tf;
dma_addr_t dma_addr;
tf = kzalloc(sizeof(*tf), GFP_KERNEL);
if (!tf)
return -ENOMEM;
tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
if (!tf->data) {
kfree(tf);
return -ENOMEM;
}
dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
kfree(tf->data);
kfree(tf);
return -ENOMEM;
}
tf->frame.buffer_phy = dma_addr;
tf->frame.callback = dma_test_rx_callback;
tf->dma_test = dt;
INIT_LIST_HEAD(&tf->frame.list);
tb_ring_rx(dt->rx_ring, &tf->frame);
}
return 0;
}
static void dma_test_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame);
struct dma_test *dt = tf->dma_test;
struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE,
DMA_TO_DEVICE);
kfree(tf->data);
kfree(tf);
}
static int dma_test_submit_tx(struct dma_test *dt, size_t npackets)
{
struct device *dma_dev = tb_ring_dma_device(dt->tx_ring);
int i;
for (i = 0; i < npackets; i++) {
struct dma_test_frame *tf;
dma_addr_t dma_addr;
tf = kzalloc(sizeof(*tf), GFP_KERNEL);
if (!tf)
return -ENOMEM;
tf->frame.size = 0; /* means 4096 */
tf->dma_test = dt;
tf->data = kmemdup(dma_test_pattern, DMA_TEST_FRAME_SIZE, GFP_KERNEL);
if (!tf->data) {
kfree(tf);
return -ENOMEM;
}
dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
kfree(tf->data);
kfree(tf);
return -ENOMEM;
}
tf->frame.buffer_phy = dma_addr;
tf->frame.callback = dma_test_tx_callback;
tf->frame.sof = DMA_TEST_PDF_FRAME_START;
tf->frame.eof = DMA_TEST_PDF_FRAME_END;
INIT_LIST_HEAD(&tf->frame.list);
dt->packets_sent++;
dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent,
dt->packets_to_send);
tb_ring_tx(dt->tx_ring, &tf->frame);
}
return 0;
}
#define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set) \
static int __fops ## _show(void *data, u64 *val) \
{ \
struct tb_service *svc = data; \
struct dma_test *dt = tb_service_get_drvdata(svc); \
int ret; \
\
ret = mutex_lock_interruptible(&dt->lock); \
if (ret) \
return ret; \
__get(dt, val); \
mutex_unlock(&dt->lock); \
return 0; \
} \
static int __fops ## _store(void *data, u64 val) \
{ \
struct tb_service *svc = data; \
struct dma_test *dt = tb_service_get_drvdata(svc); \
int ret; \
\
ret = __validate(val); \
if (ret) \
return ret; \
ret = mutex_lock_interruptible(&dt->lock); \
if (ret) \
return ret; \
__set(dt, val); \
mutex_unlock(&dt->lock); \
return 0; \
} \
DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show, \
__fops ## _store, "%llu\n")
static void lanes_get(const struct dma_test *dt, u64 *val)
{
*val = dt->link_width;
}
static int lanes_validate(u64 val)
{
return val > 2 ? -EINVAL : 0;
}
static void lanes_set(struct dma_test *dt, u64 val)
{
dt->link_width = val;
}
DMA_TEST_DEBUGFS_ATTR(lanes, lanes_get, lanes_validate, lanes_set);
static void speed_get(const struct dma_test *dt, u64 *val)
{
*val = dt->link_speed;
}
static int speed_validate(u64 val)
{
switch (val) {
case 40:
case 20:
case 10:
case 0:
return 0;
default:
return -EINVAL;
}
}
static void speed_set(struct dma_test *dt, u64 val)
{
dt->link_speed = val;
}
DMA_TEST_DEBUGFS_ATTR(speed, speed_get, speed_validate, speed_set);
static void packets_to_receive_get(const struct dma_test *dt, u64 *val)
{
*val = dt->packets_to_receive;
}
static int packets_to_receive_validate(u64 val)
{
return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0;
}
static void packets_to_receive_set(struct dma_test *dt, u64 val)
{
dt->packets_to_receive = val;
}
DMA_TEST_DEBUGFS_ATTR(packets_to_receive, packets_to_receive_get,
packets_to_receive_validate, packets_to_receive_set);
static void packets_to_send_get(const struct dma_test *dt, u64 *val)
{
*val = dt->packets_to_send;
}
static int packets_to_send_validate(u64 val)
{
return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0;
}
static void packets_to_send_set(struct dma_test *dt, u64 val)
{
dt->packets_to_send = val;
}
DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get,
packets_to_send_validate, packets_to_send_set);
static int dma_test_set_bonding(struct dma_test *dt)
{
switch (dt->link_width) {
case 2:
return tb_xdomain_lane_bonding_enable(dt->xd);
case 1:
tb_xdomain_lane_bonding_disable(dt->xd);
fallthrough;
default:
return 0;
}
}
static bool dma_test_validate_config(struct dma_test *dt)
{
if (!dt->packets_to_send && !dt->packets_to_receive)
return false;
if (dt->packets_to_send && dt->packets_to_receive &&
dt->packets_to_send != dt->packets_to_receive)
return false;
return true;
}
static void dma_test_check_errors(struct dma_test *dt, int ret)
{
if (!dt->error_code) {
if (dt->link_speed && dt->xd->link_speed != dt->link_speed) {
dt->error_code = DMA_TEST_SPEED_ERROR;
} else if (dt->link_width) {
const struct tb_xdomain *xd = dt->xd;
if ((dt->link_width == 1 && xd->link_width != TB_LINK_WIDTH_SINGLE) ||
(dt->link_width == 2 && xd->link_width < TB_LINK_WIDTH_DUAL))
dt->error_code = DMA_TEST_WIDTH_ERROR;
} else if (dt->packets_to_send != dt->packets_sent ||
dt->packets_to_receive != dt->packets_received ||
dt->crc_errors || dt->buffer_overflow_errors) {
dt->error_code = DMA_TEST_PACKET_ERROR;
} else {
return;
}
}
dt->result = DMA_TEST_FAIL;
}
static int test_store(void *data, u64 val)
{
struct tb_service *svc = data;
struct dma_test *dt = tb_service_get_drvdata(svc);
int ret;
if (val != 1)
return -EINVAL;
ret = mutex_lock_interruptible(&dt->lock);
if (ret)
return ret;
dt->packets_sent = 0;
dt->packets_received = 0;
dt->crc_errors = 0;
dt->buffer_overflow_errors = 0;
dt->result = DMA_TEST_SUCCESS;
dt->error_code = DMA_TEST_NO_ERROR;
dev_dbg(&svc->dev, "DMA test starting\n");
if (dt->link_speed)
dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed);
if (dt->link_width)
dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width);
dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send);
dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive);
if (!dma_test_validate_config(dt)) {
dev_err(&svc->dev, "invalid test configuration\n");
dt->error_code = DMA_TEST_CONFIG_ERROR;
goto out_unlock;
}
ret = dma_test_set_bonding(dt);
if (ret) {
dev_err(&svc->dev, "failed to set lanes\n");
dt->error_code = DMA_TEST_BONDING_ERROR;
goto out_unlock;
}
ret = dma_test_start_rings(dt);
if (ret) {
dev_err(&svc->dev, "failed to enable DMA rings\n");
dt->error_code = DMA_TEST_DMA_ERROR;
goto out_unlock;
}
if (dt->packets_to_receive) {
reinit_completion(&dt->complete);
ret = dma_test_submit_rx(dt, dt->packets_to_receive);
if (ret) {
dev_err(&svc->dev, "failed to submit receive buffers\n");
dt->error_code = DMA_TEST_BUFFER_ERROR;
goto out_stop;
}
}
if (dt->packets_to_send) {
ret = dma_test_submit_tx(dt, dt->packets_to_send);
if (ret) {
dev_err(&svc->dev, "failed to submit transmit buffers\n");
dt->error_code = DMA_TEST_BUFFER_ERROR;
goto out_stop;
}
}
if (dt->packets_to_receive) {
ret = wait_for_completion_interruptible(&dt->complete);
if (ret) {
dt->error_code = DMA_TEST_INTERRUPTED;
goto out_stop;
}
}
out_stop:
dma_test_stop_rings(dt);
out_unlock:
dma_test_check_errors(dt, ret);
mutex_unlock(&dt->lock);
dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]);
return ret;
}
DEFINE_DEBUGFS_ATTRIBUTE(test_fops, NULL, test_store, "%llu\n");
static int status_show(struct seq_file *s, void *not_used)
{
struct tb_service *svc = s->private;
struct dma_test *dt = tb_service_get_drvdata(svc);
int ret;
ret = mutex_lock_interruptible(&dt->lock);
if (ret)
return ret;
seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]);
if (dt->result == DMA_TEST_NOT_RUN)
goto out_unlock;
seq_printf(s, "packets received: %u\n", dt->packets_received);
seq_printf(s, "packets sent: %u\n", dt->packets_sent);
seq_printf(s, "CRC errors: %u\n", dt->crc_errors);
seq_printf(s, "buffer overflow errors: %u\n",
dt->buffer_overflow_errors);
seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]);
out_unlock:
mutex_unlock(&dt->lock);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(status);
static void dma_test_debugfs_init(struct tb_service *svc)
{
struct dma_test *dt = tb_service_get_drvdata(svc);
dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir);
debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops);
debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops);
debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc,
&packets_to_receive_fops);
debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc,
&packets_to_send_fops);
debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops);
debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops);
}
static int dma_test_probe(struct tb_service *svc, const struct tb_service_id *id)
{
struct tb_xdomain *xd = tb_service_parent(svc);
struct dma_test *dt;
dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL);
if (!dt)
return -ENOMEM;
dt->svc = svc;
dt->xd = xd;
mutex_init(&dt->lock);
init_completion(&dt->complete);
tb_service_set_drvdata(svc, dt);
dma_test_debugfs_init(svc);
return 0;
}
static void dma_test_remove(struct tb_service *svc)
{
struct dma_test *dt = tb_service_get_drvdata(svc);
mutex_lock(&dt->lock);
debugfs_remove_recursive(dt->debugfs_dir);
mutex_unlock(&dt->lock);
}
static int __maybe_unused dma_test_suspend(struct device *dev)
{
/*
* No need to do anything special here. If userspace is writing
* to the test attribute when suspend started, it comes out from
* wait_for_completion_interruptible() with -ERESTARTSYS and the
* DMA test fails tearing down the rings. Once userspace is
* thawed the kernel restarts the write syscall effectively
* re-running the test.
*/
return 0;
}
static int __maybe_unused dma_test_resume(struct device *dev)
{
return 0;
}
static const struct dev_pm_ops dma_test_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend, dma_test_resume)
};
static const struct tb_service_id dma_test_ids[] = {
{ TB_SERVICE("dma_test", 1) },
{ },
};
MODULE_DEVICE_TABLE(tbsvc, dma_test_ids);
static struct tb_service_driver dma_test_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "thunderbolt_dma_test",
.pm = &dma_test_pm_ops,
},
.probe = dma_test_probe,
.remove = dma_test_remove,
.id_table = dma_test_ids,
};
static int __init dma_test_init(void)
{
u64 data_value = DMA_TEST_DATA_PATTERN;
int i, ret;
dma_test_pattern = kmalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL);
if (!dma_test_pattern)
return -ENOMEM;
for (i = 0; i < DMA_TEST_FRAME_SIZE / sizeof(data_value); i++)
((u32 *)dma_test_pattern)[i] = data_value++;
dma_test_dir = tb_property_create_dir(&dma_test_dir_uuid);
if (!dma_test_dir) {
ret = -ENOMEM;
goto err_free_pattern;
}
tb_property_add_immediate(dma_test_dir, "prtcid", 1);
tb_property_add_immediate(dma_test_dir, "prtcvers", 1);
tb_property_add_immediate(dma_test_dir, "prtcrevs", 0);
tb_property_add_immediate(dma_test_dir, "prtcstns", 0);
ret = tb_register_property_dir("dma_test", dma_test_dir);
if (ret)
goto err_free_dir;
ret = tb_register_service_driver(&dma_test_driver);
if (ret)
goto err_unregister_dir;
return 0;
err_unregister_dir:
tb_unregister_property_dir("dma_test", dma_test_dir);
err_free_dir:
tb_property_free_dir(dma_test_dir);
err_free_pattern:
kfree(dma_test_pattern);
return ret;
}
module_init(dma_test_init);
static void __exit dma_test_exit(void)
{
tb_unregister_service_driver(&dma_test_driver);
tb_unregister_property_dir("dma_test", dma_test_dir);
tb_property_free_dir(dma_test_dir);
kfree(dma_test_pattern);
}
module_exit(dma_test_exit);
MODULE_AUTHOR("Isaac Hazan <[email protected]>");
MODULE_AUTHOR("Mika Westerberg <[email protected]>");
MODULE_DESCRIPTION("Thunderbolt/USB4 DMA traffic test driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/thunderbolt/dma_test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB4 port device
*
* Copyright (C) 2021, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include <linux/pm_runtime.h>
#include <linux/component.h>
#include <linux/property.h>
#include "tb.h"
static int connector_bind(struct device *dev, struct device *connector, void *data)
{
int ret;
ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
if (ret)
return ret;
ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev));
if (ret)
sysfs_remove_link(&dev->kobj, "connector");
return ret;
}
static void connector_unbind(struct device *dev, struct device *connector, void *data)
{
sysfs_remove_link(&connector->kobj, dev_name(dev));
sysfs_remove_link(&dev->kobj, "connector");
}
static const struct component_ops connector_ops = {
.bind = connector_bind,
.unbind = connector_unbind,
};
static ssize_t link_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
struct tb_port *port = usb4->port;
struct tb *tb = port->sw->tb;
const char *link;
if (mutex_lock_interruptible(&tb->lock))
return -ERESTARTSYS;
if (tb_is_upstream_port(port))
link = port->sw->link_usb4 ? "usb4" : "tbt";
else if (tb_port_has_remote(port))
link = port->remote->sw->link_usb4 ? "usb4" : "tbt";
else if (port->xdomain)
link = port->xdomain->link_usb4 ? "usb4" : "tbt";
else
link = "none";
mutex_unlock(&tb->lock);
return sysfs_emit(buf, "%s\n", link);
}
static DEVICE_ATTR_RO(link);
static struct attribute *common_attrs[] = {
&dev_attr_link.attr,
NULL
};
static const struct attribute_group common_group = {
.attrs = common_attrs,
};
static int usb4_port_offline(struct usb4_port *usb4)
{
struct tb_port *port = usb4->port;
int ret;
ret = tb_acpi_power_on_retimers(port);
if (ret)
return ret;
ret = usb4_port_router_offline(port);
if (ret) {
tb_acpi_power_off_retimers(port);
return ret;
}
ret = tb_retimer_scan(port, false);
if (ret) {
usb4_port_router_online(port);
tb_acpi_power_off_retimers(port);
}
return ret;
}
static void usb4_port_online(struct usb4_port *usb4)
{
struct tb_port *port = usb4->port;
usb4_port_router_online(port);
tb_acpi_power_off_retimers(port);
}
static ssize_t offline_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
return sysfs_emit(buf, "%d\n", usb4->offline);
}
static ssize_t offline_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
struct tb_port *port = usb4->port;
struct tb *tb = port->sw->tb;
bool val;
int ret;
ret = kstrtobool(buf, &val);
if (ret)
return ret;
pm_runtime_get_sync(&usb4->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm;
}
if (val == usb4->offline)
goto out_unlock;
/* Offline mode works only for ports that are not connected */
if (tb_port_has_remote(port)) {
ret = -EBUSY;
goto out_unlock;
}
if (val) {
ret = usb4_port_offline(usb4);
if (ret)
goto out_unlock;
} else {
usb4_port_online(usb4);
tb_retimer_remove_all(port);
}
usb4->offline = val;
tb_port_dbg(port, "%s offline mode\n", val ? "enter" : "exit");
out_unlock:
mutex_unlock(&tb->lock);
out_rpm:
pm_runtime_mark_last_busy(&usb4->dev);
pm_runtime_put_autosuspend(&usb4->dev);
return ret ? ret : count;
}
static DEVICE_ATTR_RW(offline);
static ssize_t rescan_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
struct tb_port *port = usb4->port;
struct tb *tb = port->sw->tb;
bool val;
int ret;
ret = kstrtobool(buf, &val);
if (ret)
return ret;
if (!val)
return count;
pm_runtime_get_sync(&usb4->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out_rpm;
}
/* Must be in offline mode already */
if (!usb4->offline) {
ret = -EINVAL;
goto out_unlock;
}
tb_retimer_remove_all(port);
ret = tb_retimer_scan(port, true);
out_unlock:
mutex_unlock(&tb->lock);
out_rpm:
pm_runtime_mark_last_busy(&usb4->dev);
pm_runtime_put_autosuspend(&usb4->dev);
return ret ? ret : count;
}
static DEVICE_ATTR_WO(rescan);
static struct attribute *service_attrs[] = {
&dev_attr_offline.attr,
&dev_attr_rescan.attr,
NULL
};
static umode_t service_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
/*
* Always need some platform help to cycle the modes so that
* retimers can be accessed through the sideband.
*/
return usb4->can_offline ? attr->mode : 0;
}
static const struct attribute_group service_group = {
.attrs = service_attrs,
.is_visible = service_attr_is_visible,
};
static const struct attribute_group *usb4_port_device_groups[] = {
&common_group,
&service_group,
NULL
};
static void usb4_port_device_release(struct device *dev)
{
struct usb4_port *usb4 = container_of(dev, struct usb4_port, dev);
kfree(usb4);
}
struct device_type usb4_port_device_type = {
.name = "usb4_port",
.groups = usb4_port_device_groups,
.release = usb4_port_device_release,
};
/**
* usb4_port_device_add() - Add USB4 port device
* @port: Lane 0 adapter port to add the USB4 port
*
* Creates and registers a USB4 port device for @port. Returns the new
* USB4 port device pointer or ERR_PTR() in case of error.
*/
struct usb4_port *usb4_port_device_add(struct tb_port *port)
{
struct usb4_port *usb4;
int ret;
usb4 = kzalloc(sizeof(*usb4), GFP_KERNEL);
if (!usb4)
return ERR_PTR(-ENOMEM);
usb4->port = port;
usb4->dev.type = &usb4_port_device_type;
usb4->dev.parent = &port->sw->dev;
dev_set_name(&usb4->dev, "usb4_port%d", port->port);
ret = device_register(&usb4->dev);
if (ret) {
put_device(&usb4->dev);
return ERR_PTR(ret);
}
if (dev_fwnode(&usb4->dev)) {
ret = component_add(&usb4->dev, &connector_ops);
if (ret) {
dev_err(&usb4->dev, "failed to add component\n");
device_unregister(&usb4->dev);
}
}
if (!tb_is_upstream_port(port))
device_set_wakeup_capable(&usb4->dev, true);
pm_runtime_no_callbacks(&usb4->dev);
pm_runtime_set_active(&usb4->dev);
pm_runtime_enable(&usb4->dev);
pm_runtime_set_autosuspend_delay(&usb4->dev, TB_AUTOSUSPEND_DELAY);
pm_runtime_mark_last_busy(&usb4->dev);
pm_runtime_use_autosuspend(&usb4->dev);
return usb4;
}
/**
* usb4_port_device_remove() - Removes USB4 port device
* @usb4: USB4 port device
*
* Unregisters the USB4 port device from the system. The device will be
* released when the last reference is dropped.
*/
void usb4_port_device_remove(struct usb4_port *usb4)
{
if (dev_fwnode(&usb4->dev))
component_del(&usb4->dev, &connector_ops);
device_unregister(&usb4->dev);
}
/**
* usb4_port_device_resume() - Resumes USB4 port device
* @usb4: USB4 port device
*
* Used to resume USB4 port device after sleep state.
*/
int usb4_port_device_resume(struct usb4_port *usb4)
{
return usb4->offline ? usb4_port_offline(usb4) : 0;
}
| linux-master | drivers/thunderbolt/usb4_port.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt driver - path/tunnel functionality
*
* Copyright (c) 2014 Andreas Noever <[email protected]>
* Copyright (C) 2019, Intel Corporation
*/
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/ktime.h>
#include "tb.h"
static void tb_dump_hop(const struct tb_path_hop *hop, const struct tb_regs_hop *regs)
{
const struct tb_port *port = hop->in_port;
tb_port_dbg(port, " In HopID: %d => Out port: %d Out HopID: %d\n",
hop->in_hop_index, regs->out_port, regs->next_hop);
tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
regs->weight, regs->priority,
regs->initial_credits, regs->drop_packages);
tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
regs->counter_enable, regs->counter);
tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
regs->ingress_fc, regs->egress_fc,
regs->ingress_shared_buffer, regs->egress_shared_buffer);
tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
regs->unknown1, regs->unknown2, regs->unknown3);
}
static struct tb_port *tb_path_find_dst_port(struct tb_port *src, int src_hopid,
int dst_hopid)
{
struct tb_port *port, *out_port = NULL;
struct tb_regs_hop hop;
struct tb_switch *sw;
int i, ret, hopid;
hopid = src_hopid;
port = src;
for (i = 0; port && i < TB_PATH_MAX_HOPS; i++) {
sw = port->sw;
ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hopid, 2);
if (ret) {
tb_port_warn(port, "failed to read path at %d\n", hopid);
return NULL;
}
if (!hop.enable)
return NULL;
out_port = &sw->ports[hop.out_port];
hopid = hop.next_hop;
port = out_port->remote;
}
return out_port && hopid == dst_hopid ? out_port : NULL;
}
static int tb_path_find_src_hopid(struct tb_port *src,
const struct tb_port *dst, int dst_hopid)
{
struct tb_port *out;
int i;
for (i = TB_PATH_MIN_HOPID; i <= src->config.max_in_hop_id; i++) {
out = tb_path_find_dst_port(src, i, dst_hopid);
if (out == dst)
return i;
}
return 0;
}
/**
* tb_path_discover() - Discover a path
* @src: First input port of a path
* @src_hopid: Starting HopID of a path (%-1 if don't care)
* @dst: Expected destination port of the path (%NULL if don't care)
* @dst_hopid: HopID to the @dst (%-1 if don't care)
* @last: Last port is filled here if not %NULL
* @name: Name of the path
* @alloc_hopid: Allocate HopIDs for the ports
*
* Follows a path starting from @src and @src_hopid to the last output
* port of the path. Allocates HopIDs for the visited ports (if
* @alloc_hopid is true). Call tb_path_free() to release the path and
* allocated HopIDs when the path is not needed anymore.
*
* Note function discovers also incomplete paths so caller should check
* that the @dst port is the expected one. If it is not, the path can be
* cleaned up by calling tb_path_deactivate() before tb_path_free().
*
* Return: Discovered path on success, %NULL in case of failure
*/
struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid,
struct tb_port **last, const char *name,
bool alloc_hopid)
{
struct tb_port *out_port;
struct tb_regs_hop hop;
struct tb_path *path;
struct tb_switch *sw;
struct tb_port *p;
size_t num_hops;
int ret, i, h;
if (src_hopid < 0 && dst) {
/*
* For incomplete paths the intermediate HopID can be
* different from the one used by the protocol adapter
* so in that case find a path that ends on @dst with
* matching @dst_hopid. That should give us the correct
* HopID for the @src.
*/
src_hopid = tb_path_find_src_hopid(src, dst, dst_hopid);
if (!src_hopid)
return NULL;
}
p = src;
h = src_hopid;
num_hops = 0;
for (i = 0; p && i < TB_PATH_MAX_HOPS; i++) {
sw = p->sw;
ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
if (ret) {
tb_port_warn(p, "failed to read path at %d\n", h);
return NULL;
}
/* If the hop is not enabled we got an incomplete path */
if (!hop.enable)
break;
out_port = &sw->ports[hop.out_port];
if (last)
*last = out_port;
h = hop.next_hop;
p = out_port->remote;
num_hops++;
}
path = kzalloc(sizeof(*path), GFP_KERNEL);
if (!path)
return NULL;
path->name = name;
path->tb = src->sw->tb;
path->path_length = num_hops;
path->activated = true;
path->alloc_hopid = alloc_hopid;
path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
if (!path->hops) {
kfree(path);
return NULL;
}
tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n",
path->name, tb_route(src->sw), src->port);
p = src;
h = src_hopid;
for (i = 0; i < num_hops; i++) {
int next_hop;
sw = p->sw;
ret = tb_port_read(p, &hop, TB_CFG_HOPS, 2 * h, 2);
if (ret) {
tb_port_warn(p, "failed to read path at %d\n", h);
goto err;
}
if (alloc_hopid && tb_port_alloc_in_hopid(p, h, h) < 0)
goto err;
out_port = &sw->ports[hop.out_port];
next_hop = hop.next_hop;
if (alloc_hopid &&
tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
tb_port_release_in_hopid(p, h);
goto err;
}
path->hops[i].in_port = p;
path->hops[i].in_hop_index = h;
path->hops[i].in_counter_index = -1;
path->hops[i].out_port = out_port;
path->hops[i].next_hop_index = next_hop;
tb_dump_hop(&path->hops[i], &hop);
h = next_hop;
p = out_port->remote;
}
tb_dbg(path->tb, "path discovery complete\n");
return path;
err:
tb_port_warn(src, "failed to discover path starting at HopID %d\n",
src_hopid);
tb_path_free(path);
return NULL;
}
/**
* tb_path_alloc() - allocate a thunderbolt path between two ports
* @tb: Domain pointer
* @src: Source port of the path
* @src_hopid: HopID used for the first ingress port in the path
* @dst: Destination port of the path
* @dst_hopid: HopID used for the last egress port in the path
* @link_nr: Preferred link if there are dual links on the path
* @name: Name of the path
*
* Creates path between two ports starting with given @src_hopid. Reserves
* HopIDs for each port (they can be different from @src_hopid depending on
* how many HopIDs each port already have reserved). If there are dual
* links on the path, prioritizes using @link_nr but takes into account
* that the lanes may be bonded.
*
* Return: Returns a tb_path on success or NULL on failure.
*/
struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid, int link_nr,
const char *name)
{
struct tb_port *in_port, *out_port, *first_port, *last_port;
int in_hopid, out_hopid;
struct tb_path *path;
size_t num_hops;
int i, ret;
path = kzalloc(sizeof(*path), GFP_KERNEL);
if (!path)
return NULL;
first_port = last_port = NULL;
i = 0;
tb_for_each_port_on_path(src, dst, in_port) {
if (!first_port)
first_port = in_port;
last_port = in_port;
i++;
}
/* Check that src and dst are reachable */
if (first_port != src || last_port != dst) {
kfree(path);
return NULL;
}
/* Each hop takes two ports */
num_hops = i / 2;
path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
if (!path->hops) {
kfree(path);
return NULL;
}
path->alloc_hopid = true;
in_hopid = src_hopid;
out_port = NULL;
for (i = 0; i < num_hops; i++) {
in_port = tb_next_port_on_path(src, dst, out_port);
if (!in_port)
goto err;
/* When lanes are bonded primary link must be used */
if (!in_port->bonded && in_port->dual_link_port &&
in_port->link_nr != link_nr)
in_port = in_port->dual_link_port;
ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid);
if (ret < 0)
goto err;
in_hopid = ret;
out_port = tb_next_port_on_path(src, dst, in_port);
if (!out_port)
goto err;
/*
* Pick up right port when going from non-bonded to
* bonded or from bonded to non-bonded.
*/
if (out_port->dual_link_port) {
if (!in_port->bonded && out_port->bonded &&
out_port->link_nr) {
/*
* Use primary link when going from
* non-bonded to bonded.
*/
out_port = out_port->dual_link_port;
} else if (!out_port->bonded &&
out_port->link_nr != link_nr) {
/*
* If out port is not bonded follow
* link_nr.
*/
out_port = out_port->dual_link_port;
}
}
if (i == num_hops - 1)
ret = tb_port_alloc_out_hopid(out_port, dst_hopid,
dst_hopid);
else
ret = tb_port_alloc_out_hopid(out_port, -1, -1);
if (ret < 0)
goto err;
out_hopid = ret;
path->hops[i].in_hop_index = in_hopid;
path->hops[i].in_port = in_port;
path->hops[i].in_counter_index = -1;
path->hops[i].out_port = out_port;
path->hops[i].next_hop_index = out_hopid;
in_hopid = out_hopid;
}
path->tb = tb;
path->path_length = num_hops;
path->name = name;
return path;
err:
tb_path_free(path);
return NULL;
}
/**
* tb_path_free() - free a path
* @path: Path to free
*
* Frees a path. The path does not need to be deactivated.
*/
void tb_path_free(struct tb_path *path)
{
if (path->alloc_hopid) {
int i;
for (i = 0; i < path->path_length; i++) {
const struct tb_path_hop *hop = &path->hops[i];
if (hop->in_port)
tb_port_release_in_hopid(hop->in_port,
hop->in_hop_index);
if (hop->out_port)
tb_port_release_out_hopid(hop->out_port,
hop->next_hop_index);
}
}
kfree(path->hops);
kfree(path);
}
static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop)
{
int i, res;
for (i = first_hop; i < path->path_length; i++) {
res = tb_port_add_nfc_credits(path->hops[i].in_port,
-path->hops[i].nfc_credits);
if (res)
tb_port_warn(path->hops[i].in_port,
"nfc credits deallocation failed for hop %d\n",
i);
}
}
static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
bool clear_fc)
{
struct tb_regs_hop hop;
ktime_t timeout;
int ret;
/* Disable the path */
ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
if (ret)
return ret;
/* Already disabled */
if (!hop.enable)
return 0;
hop.enable = 0;
ret = tb_port_write(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
if (ret)
return ret;
/* Wait until it is drained */
timeout = ktime_add_ms(ktime_get(), 500);
do {
ret = tb_port_read(port, &hop, TB_CFG_HOPS, 2 * hop_index, 2);
if (ret)
return ret;
if (!hop.pending) {
if (clear_fc) {
/*
* Clear flow control. Protocol adapters
* IFC and ISE bits are vendor defined
* in the USB4 spec so we clear them
* only for pre-USB4 adapters.
*/
if (!tb_switch_is_usb4(port->sw)) {
hop.ingress_fc = 0;
hop.ingress_shared_buffer = 0;
}
hop.egress_fc = 0;
hop.egress_shared_buffer = 0;
return tb_port_write(port, &hop, TB_CFG_HOPS,
2 * hop_index, 2);
}
return 0;
}
usleep_range(10, 20);
} while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
}
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
{
int i, res;
for (i = first_hop; i < path->path_length; i++) {
res = __tb_path_deactivate_hop(path->hops[i].in_port,
path->hops[i].in_hop_index,
path->clear_fc);
if (res && res != -ENODEV)
tb_port_warn(path->hops[i].in_port,
"hop deactivation failed for hop %d, index %d\n",
i, path->hops[i].in_hop_index);
}
}
void tb_path_deactivate(struct tb_path *path)
{
if (!path->activated) {
tb_WARN(path->tb, "trying to deactivate an inactive path\n");
return;
}
tb_dbg(path->tb,
"deactivating %s path from %llx:%u to %llx:%u\n",
path->name, tb_route(path->hops[0].in_port->sw),
path->hops[0].in_port->port,
tb_route(path->hops[path->path_length - 1].out_port->sw),
path->hops[path->path_length - 1].out_port->port);
__tb_path_deactivate_hops(path, 0);
__tb_path_deallocate_nfc(path, 0);
path->activated = false;
}
/**
* tb_path_activate() - activate a path
* @path: Path to activate
*
* Activate a path starting with the last hop and iterating backwards. The
* caller must fill path->hops before calling tb_path_activate().
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_path_activate(struct tb_path *path)
{
int i, res;
enum tb_path_port out_mask, in_mask;
if (path->activated) {
tb_WARN(path->tb, "trying to activate already activated path\n");
return -EINVAL;
}
tb_dbg(path->tb,
"activating %s path from %llx:%u to %llx:%u\n",
path->name, tb_route(path->hops[0].in_port->sw),
path->hops[0].in_port->port,
tb_route(path->hops[path->path_length - 1].out_port->sw),
path->hops[path->path_length - 1].out_port->port);
/* Clear counters. */
for (i = path->path_length - 1; i >= 0; i--) {
if (path->hops[i].in_counter_index == -1)
continue;
res = tb_port_clear_counter(path->hops[i].in_port,
path->hops[i].in_counter_index);
if (res)
goto err;
}
/* Add non flow controlled credits. */
for (i = path->path_length - 1; i >= 0; i--) {
res = tb_port_add_nfc_credits(path->hops[i].in_port,
path->hops[i].nfc_credits);
if (res) {
__tb_path_deallocate_nfc(path, i);
goto err;
}
}
/* Activate hops. */
for (i = path->path_length - 1; i >= 0; i--) {
struct tb_regs_hop hop = { 0 };
/* If it is left active deactivate it first */
__tb_path_deactivate_hop(path->hops[i].in_port,
path->hops[i].in_hop_index, path->clear_fc);
/* dword 0 */
hop.next_hop = path->hops[i].next_hop_index;
hop.out_port = path->hops[i].out_port->port;
hop.initial_credits = path->hops[i].initial_credits;
hop.unknown1 = 0;
hop.enable = 1;
/* dword 1 */
out_mask = (i == path->path_length - 1) ?
TB_PATH_DESTINATION : TB_PATH_INTERNAL;
in_mask = (i == 0) ? TB_PATH_SOURCE : TB_PATH_INTERNAL;
hop.weight = path->weight;
hop.unknown2 = 0;
hop.priority = path->priority;
hop.drop_packages = path->drop_packages;
hop.counter = path->hops[i].in_counter_index;
hop.counter_enable = path->hops[i].in_counter_index != -1;
hop.ingress_fc = path->ingress_fc_enable & in_mask;
hop.egress_fc = path->egress_fc_enable & out_mask;
hop.ingress_shared_buffer = path->ingress_shared_buffer
& in_mask;
hop.egress_shared_buffer = path->egress_shared_buffer
& out_mask;
hop.unknown3 = 0;
tb_port_dbg(path->hops[i].in_port, "Writing hop %d\n", i);
tb_dump_hop(&path->hops[i], &hop);
res = tb_port_write(path->hops[i].in_port, &hop, TB_CFG_HOPS,
2 * path->hops[i].in_hop_index, 2);
if (res) {
__tb_path_deactivate_hops(path, i);
__tb_path_deallocate_nfc(path, 0);
goto err;
}
}
path->activated = true;
tb_dbg(path->tb, "path activation complete\n");
return 0;
err:
tb_WARN(path->tb, "path activation failed\n");
return res;
}
/**
* tb_path_is_invalid() - check whether any ports on the path are invalid
* @path: Path to check
*
* Return: Returns true if the path is invalid, false otherwise.
*/
bool tb_path_is_invalid(struct tb_path *path)
{
int i = 0;
for (i = 0; i < path->path_length; i++) {
if (path->hops[i].in_port->sw->is_unplugged)
return true;
if (path->hops[i].out_port->sw->is_unplugged)
return true;
}
return false;
}
/**
* tb_path_port_on_path() - Does the path go through certain port
* @path: Path to check
* @port: Switch to check
*
* Goes over all hops on path and checks if @port is any of them.
* Direction does not matter.
*/
bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port)
{
int i;
for (i = 0; i < path->path_length; i++) {
if (path->hops[i].in_port == port ||
path->hops[i].out_port == port)
return true;
}
return false;
}
| linux-master | drivers/thunderbolt/path.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Internal Thunderbolt Connection Manager. This is a firmware running on
* the Thunderbolt host controller performing most of the low-level
* handling.
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <[email protected]>
* Mika Westerberg <[email protected]>
*/
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include "ctl.h"
#include "nhi_regs.h"
#include "tb.h"
#define PCIE2CIO_CMD 0x30
#define PCIE2CIO_CMD_TIMEOUT BIT(31)
#define PCIE2CIO_CMD_START BIT(30)
#define PCIE2CIO_CMD_WRITE BIT(21)
#define PCIE2CIO_CMD_CS_MASK GENMASK(20, 19)
#define PCIE2CIO_CMD_CS_SHIFT 19
#define PCIE2CIO_CMD_PORT_MASK GENMASK(18, 13)
#define PCIE2CIO_CMD_PORT_SHIFT 13
#define PCIE2CIO_WRDATA 0x34
#define PCIE2CIO_RDDATA 0x38
#define PHY_PORT_CS1 0x37
#define PHY_PORT_CS1_LINK_DISABLE BIT(14)
#define PHY_PORT_CS1_LINK_STATE_MASK GENMASK(29, 26)
#define PHY_PORT_CS1_LINK_STATE_SHIFT 26
#define ICM_TIMEOUT 5000 /* ms */
#define ICM_APPROVE_TIMEOUT 10000 /* ms */
#define ICM_MAX_LINK 4
static bool start_icm;
module_param(start_icm, bool, 0444);
MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)");
/**
* struct usb4_switch_nvm_auth - Holds USB4 NVM_AUTH status
* @reply: Reply from ICM firmware is placed here
* @request: Request that is sent to ICM firmware
* @icm: Pointer to ICM private data
*/
struct usb4_switch_nvm_auth {
struct icm_usb4_switch_op_response reply;
struct icm_usb4_switch_op request;
struct icm *icm;
};
/**
* struct icm - Internal connection manager private data
* @request_lock: Makes sure only one message is send to ICM at time
* @rescan_work: Work used to rescan the surviving switches after resume
* @upstream_port: Pointer to the PCIe upstream port this host
* controller is connected. This is only set for systems
* where ICM needs to be started manually
* @vnd_cap: Vendor defined capability where PCIe2CIO mailbox resides
* (only set when @upstream_port is not %NULL)
* @safe_mode: ICM is in safe mode
* @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported)
* @rpm: Does the controller support runtime PM (RTD3)
* @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller
* @proto_version: Firmware protocol version
* @last_nvm_auth: Last USB4 router NVM_AUTH result (or %NULL if not set)
* @veto: Is RTD3 veto in effect
* @is_supported: Checks if we can support ICM on this controller
* @cio_reset: Trigger CIO reset
* @get_mode: Read and return the ICM firmware mode (optional)
* @get_route: Find a route string for given switch
* @save_devices: Ask ICM to save devices to ACL when suspending (optional)
* @driver_ready: Send driver ready message to ICM
* @set_uuid: Set UUID for the root switch (optional)
* @device_connected: Handle device connected ICM message
* @device_disconnected: Handle device disconnected ICM message
* @xdomain_connected: Handle XDomain connected ICM message
* @xdomain_disconnected: Handle XDomain disconnected ICM message
* @rtd3_veto: Handle RTD3 veto notification ICM message
*/
struct icm {
struct mutex request_lock;
struct delayed_work rescan_work;
struct pci_dev *upstream_port;
int vnd_cap;
bool safe_mode;
size_t max_boot_acl;
bool rpm;
bool can_upgrade_nvm;
u8 proto_version;
struct usb4_switch_nvm_auth *last_nvm_auth;
bool veto;
bool (*is_supported)(struct tb *tb);
int (*cio_reset)(struct tb *tb);
int (*get_mode)(struct tb *tb);
int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route);
void (*save_devices)(struct tb *tb);
int (*driver_ready)(struct tb *tb,
enum tb_security_level *security_level,
u8 *proto_version, size_t *nboot_acl, bool *rpm);
void (*set_uuid)(struct tb *tb);
void (*device_connected)(struct tb *tb,
const struct icm_pkg_header *hdr);
void (*device_disconnected)(struct tb *tb,
const struct icm_pkg_header *hdr);
void (*xdomain_connected)(struct tb *tb,
const struct icm_pkg_header *hdr);
void (*xdomain_disconnected)(struct tb *tb,
const struct icm_pkg_header *hdr);
void (*rtd3_veto)(struct tb *tb, const struct icm_pkg_header *hdr);
};
struct icm_notification {
struct work_struct work;
struct icm_pkg_header *pkg;
struct tb *tb;
};
struct ep_name_entry {
u8 len;
u8 type;
u8 data[];
};
#define EP_NAME_INTEL_VSS 0x10
/* Intel Vendor specific structure */
struct intel_vss {
u16 vendor;
u16 model;
u8 mc;
u8 flags;
u16 pci_devid;
u32 nvm_version;
};
#define INTEL_VSS_FLAGS_RTD3 BIT(0)
static const struct intel_vss *parse_intel_vss(const void *ep_name, size_t size)
{
const void *end = ep_name + size;
while (ep_name < end) {
const struct ep_name_entry *ep = ep_name;
if (!ep->len)
break;
if (ep_name + ep->len > end)
break;
if (ep->type == EP_NAME_INTEL_VSS)
return (const struct intel_vss *)ep->data;
ep_name += ep->len;
}
return NULL;
}
static bool intel_vss_is_rtd3(const void *ep_name, size_t size)
{
const struct intel_vss *vss;
vss = parse_intel_vss(ep_name, size);
if (vss)
return !!(vss->flags & INTEL_VSS_FLAGS_RTD3);
return false;
}
static inline struct tb *icm_to_tb(struct icm *icm)
{
return ((void *)icm - sizeof(struct tb));
}
static inline u8 phy_port_from_route(u64 route, u8 depth)
{
u8 link;
link = depth ? route >> ((depth - 1) * 8) : route;
return tb_phy_port_from_link(link);
}
static inline u8 dual_link_from_link(u8 link)
{
return link ? ((link - 1) ^ 0x01) + 1 : 0;
}
static inline u64 get_route(u32 route_hi, u32 route_lo)
{
return (u64)route_hi << 32 | route_lo;
}
static inline u64 get_parent_route(u64 route)
{
int depth = tb_route_length(route);
return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0;
}
static int pci2cio_wait_completion(struct icm *icm, unsigned long timeout_msec)
{
unsigned long end = jiffies + msecs_to_jiffies(timeout_msec);
u32 cmd;
do {
pci_read_config_dword(icm->upstream_port,
icm->vnd_cap + PCIE2CIO_CMD, &cmd);
if (!(cmd & PCIE2CIO_CMD_START)) {
if (cmd & PCIE2CIO_CMD_TIMEOUT)
break;
return 0;
}
msleep(50);
} while (time_before(jiffies, end));
return -ETIMEDOUT;
}
static int pcie2cio_read(struct icm *icm, enum tb_cfg_space cs,
unsigned int port, unsigned int index, u32 *data)
{
struct pci_dev *pdev = icm->upstream_port;
int ret, vnd_cap = icm->vnd_cap;
u32 cmd;
cmd = index;
cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
cmd |= PCIE2CIO_CMD_START;
pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
ret = pci2cio_wait_completion(icm, 5000);
if (ret)
return ret;
pci_read_config_dword(pdev, vnd_cap + PCIE2CIO_RDDATA, data);
return 0;
}
static int pcie2cio_write(struct icm *icm, enum tb_cfg_space cs,
unsigned int port, unsigned int index, u32 data)
{
struct pci_dev *pdev = icm->upstream_port;
int vnd_cap = icm->vnd_cap;
u32 cmd;
pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_WRDATA, data);
cmd = index;
cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
cmd |= (cs << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
pci_write_config_dword(pdev, vnd_cap + PCIE2CIO_CMD, cmd);
return pci2cio_wait_completion(icm, 5000);
}
static bool icm_match(const struct tb_cfg_request *req,
const struct ctl_pkg *pkg)
{
const struct icm_pkg_header *res_hdr = pkg->buffer;
const struct icm_pkg_header *req_hdr = req->request;
if (pkg->frame.eof != req->response_type)
return false;
if (res_hdr->code != req_hdr->code)
return false;
return true;
}
static bool icm_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
{
const struct icm_pkg_header *hdr = pkg->buffer;
if (hdr->packet_id < req->npackets) {
size_t offset = hdr->packet_id * req->response_size;
memcpy(req->response + offset, pkg->buffer, req->response_size);
}
return hdr->packet_id == hdr->total_packets - 1;
}
static int icm_request(struct tb *tb, const void *request, size_t request_size,
void *response, size_t response_size, size_t npackets,
unsigned int timeout_msec)
{
struct icm *icm = tb_priv(tb);
int retries = 3;
do {
struct tb_cfg_request *req;
struct tb_cfg_result res;
req = tb_cfg_request_alloc();
if (!req)
return -ENOMEM;
req->match = icm_match;
req->copy = icm_copy;
req->request = request;
req->request_size = request_size;
req->request_type = TB_CFG_PKG_ICM_CMD;
req->response = response;
req->npackets = npackets;
req->response_size = response_size;
req->response_type = TB_CFG_PKG_ICM_RESP;
mutex_lock(&icm->request_lock);
res = tb_cfg_request_sync(tb->ctl, req, timeout_msec);
mutex_unlock(&icm->request_lock);
tb_cfg_request_put(req);
if (res.err != -ETIMEDOUT)
return res.err == 1 ? -EIO : res.err;
usleep_range(20, 50);
} while (retries--);
return -ETIMEDOUT;
}
/*
* If rescan is queued to run (we are resuming), postpone it to give the
* firmware some more time to send device connected notifications for next
* devices in the chain.
*/
static void icm_postpone_rescan(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
if (delayed_work_pending(&icm->rescan_work))
mod_delayed_work(tb->wq, &icm->rescan_work,
msecs_to_jiffies(500));
}
static void icm_veto_begin(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
if (!icm->veto) {
icm->veto = true;
/* Keep the domain powered while veto is in effect */
pm_runtime_get(&tb->dev);
}
}
static void icm_veto_end(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
if (icm->veto) {
icm->veto = false;
/* Allow the domain suspend now */
pm_runtime_mark_last_busy(&tb->dev);
pm_runtime_put_autosuspend(&tb->dev);
}
}
static bool icm_firmware_running(const struct tb_nhi *nhi)
{
u32 val;
val = ioread32(nhi->iobase + REG_FW_STS);
return !!(val & REG_FW_STS_ICM_EN);
}
static bool icm_fr_is_supported(struct tb *tb)
{
return !x86_apple_machine;
}
static inline int icm_fr_get_switch_index(u32 port)
{
int index;
if ((port & ICM_PORT_TYPE_MASK) != TB_TYPE_PORT)
return 0;
index = port >> ICM_PORT_INDEX_SHIFT;
return index != 0xff ? index : 0;
}
static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
{
struct icm_fr_pkg_get_topology_response *switches, *sw;
struct icm_fr_pkg_get_topology request = {
.hdr = { .code = ICM_GET_TOPOLOGY },
};
size_t npackets = ICM_GET_TOPOLOGY_PACKETS;
int ret, index;
u8 i;
switches = kcalloc(npackets, sizeof(*switches), GFP_KERNEL);
if (!switches)
return -ENOMEM;
ret = icm_request(tb, &request, sizeof(request), switches,
sizeof(*switches), npackets, ICM_TIMEOUT);
if (ret)
goto err_free;
sw = &switches[0];
index = icm_fr_get_switch_index(sw->ports[link]);
if (!index) {
ret = -ENODEV;
goto err_free;
}
sw = &switches[index];
for (i = 1; i < depth; i++) {
unsigned int j;
if (!(sw->first_data & ICM_SWITCH_USED)) {
ret = -ENODEV;
goto err_free;
}
for (j = 0; j < ARRAY_SIZE(sw->ports); j++) {
index = icm_fr_get_switch_index(sw->ports[j]);
if (index > sw->switch_index) {
sw = &switches[index];
break;
}
}
}
*route = get_route(sw->route_hi, sw->route_lo);
err_free:
kfree(switches);
return ret;
}
static void icm_fr_save_devices(struct tb *tb)
{
nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_SAVE_DEVS, 0);
}
static int
icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
u8 *proto_version, size_t *nboot_acl, bool *rpm)
{
struct icm_fr_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
.hdr.code = ICM_DRIVER_READY,
};
int ret;
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (security_level)
*security_level = reply.security_level & ICM_FR_SLEVEL_MASK;
return 0;
}
static int icm_fr_approve_switch(struct tb *tb, struct tb_switch *sw)
{
struct icm_fr_pkg_approve_device request;
struct icm_fr_pkg_approve_device reply;
int ret;
memset(&request, 0, sizeof(request));
memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
request.hdr.code = ICM_APPROVE_DEVICE;
request.connection_id = sw->connection_id;
request.connection_key = sw->connection_key;
memset(&reply, 0, sizeof(reply));
/* Use larger timeout as establishing tunnels can take some time */
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_APPROVE_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR) {
tb_warn(tb, "PCIe tunnel creation failed\n");
return -EIO;
}
return 0;
}
static int icm_fr_add_switch_key(struct tb *tb, struct tb_switch *sw)
{
struct icm_fr_pkg_add_device_key request;
struct icm_fr_pkg_add_device_key_response reply;
int ret;
memset(&request, 0, sizeof(request));
memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
request.hdr.code = ICM_ADD_DEVICE_KEY;
request.connection_id = sw->connection_id;
request.connection_key = sw->connection_key;
memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR) {
tb_warn(tb, "Adding key to switch failed\n");
return -EIO;
}
return 0;
}
static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
const u8 *challenge, u8 *response)
{
struct icm_fr_pkg_challenge_device request;
struct icm_fr_pkg_challenge_device_response reply;
int ret;
memset(&request, 0, sizeof(request));
memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
request.hdr.code = ICM_CHALLENGE_DEVICE;
request.connection_id = sw->connection_id;
request.connection_key = sw->connection_key;
memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EKEYREJECTED;
if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
return -ENOKEY;
memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
return 0;
}
static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{
struct icm_fr_pkg_approve_xdomain_response reply;
struct icm_fr_pkg_approve_xdomain request;
int ret;
memset(&request, 0, sizeof(request));
request.hdr.code = ICM_APPROVE_XDOMAIN;
request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link;
memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
request.transmit_path = transmit_path;
request.transmit_ring = transmit_ring;
request.receive_path = receive_path;
request.receive_ring = receive_ring;
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
return 0;
}
static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{
u8 phy_port;
u8 cmd;
phy_port = tb_phy_port_from_link(xd->link);
if (phy_port == 0)
cmd = NHI_MAILBOX_DISCONNECT_PA;
else
cmd = NHI_MAILBOX_DISCONNECT_PB;
nhi_mailbox_cmd(tb->nhi, cmd, 1);
usleep_range(10, 50);
nhi_mailbox_cmd(tb->nhi, cmd, 2);
return 0;
}
static struct tb_switch *alloc_switch(struct tb_switch *parent_sw, u64 route,
const uuid_t *uuid)
{
struct tb *tb = parent_sw->tb;
struct tb_switch *sw;
sw = tb_switch_alloc(tb, &parent_sw->dev, route);
if (IS_ERR(sw)) {
tb_warn(tb, "failed to allocate switch at %llx\n", route);
return sw;
}
sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
if (!sw->uuid) {
tb_switch_put(sw);
return ERR_PTR(-ENOMEM);
}
init_completion(&sw->rpm_complete);
return sw;
}
static int add_switch(struct tb_switch *parent_sw, struct tb_switch *sw)
{
u64 route = tb_route(sw);
int ret;
/* Link the two switches now */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw);
ret = tb_switch_add(sw);
if (ret)
tb_port_at(tb_route(sw), parent_sw)->remote = NULL;
return ret;
}
static void update_switch(struct tb_switch *sw, u64 route, u8 connection_id,
u8 connection_key, u8 link, u8 depth, bool boot)
{
struct tb_switch *parent_sw = tb_switch_parent(sw);
/* Disconnect from parent */
tb_switch_downstream_port(sw)->remote = NULL;
/* Re-connect via updated port */
tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw);
/* Update with the new addressing information */
sw->config.route_hi = upper_32_bits(route);
sw->config.route_lo = lower_32_bits(route);
sw->connection_id = connection_id;
sw->connection_key = connection_key;
sw->link = link;
sw->depth = depth;
sw->boot = boot;
/* This switch still exists */
sw->is_unplugged = false;
/* Runtime resume is now complete */
complete(&sw->rpm_complete);
}
static void remove_switch(struct tb_switch *sw)
{
tb_switch_downstream_port(sw)->remote = NULL;
tb_switch_remove(sw);
}
static void add_xdomain(struct tb_switch *sw, u64 route,
const uuid_t *local_uuid, const uuid_t *remote_uuid,
u8 link, u8 depth)
{
struct tb_xdomain *xd;
pm_runtime_get_sync(&sw->dev);
xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid);
if (!xd)
goto out;
xd->link = link;
xd->depth = depth;
tb_port_at(route, sw)->xdomain = xd;
tb_xdomain_add(xd);
out:
pm_runtime_mark_last_busy(&sw->dev);
pm_runtime_put_autosuspend(&sw->dev);
}
static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link)
{
xd->link = link;
xd->route = route;
xd->is_unplugged = false;
}
static void remove_xdomain(struct tb_xdomain *xd)
{
struct tb_switch *sw;
sw = tb_to_switch(xd->dev.parent);
tb_port_at(xd->route, sw)->xdomain = NULL;
tb_xdomain_remove(xd);
}
static void
icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
{
const struct icm_fr_event_device_connected *pkg =
(const struct icm_fr_event_device_connected *)hdr;
enum tb_security_level security_level;
struct tb_switch *sw, *parent_sw;
bool boot, dual_lane, speed_gen3;
struct icm *icm = tb_priv(tb);
bool authorized = false;
struct tb_xdomain *xd;
u8 link, depth;
u64 route;
int ret;
icm_postpone_rescan(tb);
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
ICM_FLAGS_SLEVEL_SHIFT;
boot = pkg->link_info & ICM_LINK_INFO_BOOT;
dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n",
link, depth);
return;
}
sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
if (sw) {
u8 phy_port, sw_phy_port;
sw_phy_port = tb_phy_port_from_link(sw->link);
phy_port = tb_phy_port_from_link(link);
/*
* On resume ICM will send us connected events for the
* devices that still are present. However, that
* information might have changed for example by the
* fact that a switch on a dual-link connection might
* have been enumerated using the other link now. Make
* sure our book keeping matches that.
*/
if (sw->depth == depth && sw_phy_port == phy_port &&
!!sw->authorized == authorized) {
/*
* It was enumerated through another link so update
* route string accordingly.
*/
if (sw->link != link) {
ret = icm->get_route(tb, link, depth, &route);
if (ret) {
tb_err(tb, "failed to update route string for switch at %u.%u\n",
link, depth);
tb_switch_put(sw);
return;
}
} else {
route = tb_route(sw);
}
update_switch(sw, route, pkg->connection_id,
pkg->connection_key, link, depth, boot);
tb_switch_put(sw);
return;
}
/*
* User connected the same switch to another physical
* port or to another part of the topology. Remove the
* existing switch now before adding the new one.
*/
remove_switch(sw);
tb_switch_put(sw);
}
/*
* If the switch was not found by UUID, look for a switch on
* same physical port (taking possible link aggregation into
* account) and depth. If we found one it is definitely a stale
* one so remove it first.
*/
sw = tb_switch_find_by_link_depth(tb, link, depth);
if (!sw) {
u8 dual_link;
dual_link = dual_link_from_link(link);
if (dual_link)
sw = tb_switch_find_by_link_depth(tb, dual_link, depth);
}
if (sw) {
remove_switch(sw);
tb_switch_put(sw);
}
/* Remove existing XDomain connection if found */
xd = tb_xdomain_find_by_link_depth(tb, link, depth);
if (xd) {
remove_xdomain(xd);
tb_xdomain_put(xd);
}
parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
if (!parent_sw) {
tb_err(tb, "failed to find parent switch for %u.%u\n",
link, depth);
return;
}
ret = icm->get_route(tb, link, depth, &route);
if (ret) {
tb_err(tb, "failed to find route string for switch at %u.%u\n",
link, depth);
tb_switch_put(parent_sw);
return;
}
pm_runtime_get_sync(&parent_sw->dev);
sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
if (!IS_ERR(sw)) {
sw->connection_id = pkg->connection_id;
sw->connection_key = pkg->connection_key;
sw->link = link;
sw->depth = depth;
sw->authorized = authorized;
sw->security_level = security_level;
sw->boot = boot;
sw->link_speed = speed_gen3 ? 20 : 10;
sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL :
TB_LINK_WIDTH_SINGLE;
sw->rpm = intel_vss_is_rtd3(pkg->ep_name, sizeof(pkg->ep_name));
if (add_switch(parent_sw, sw))
tb_switch_put(sw);
}
pm_runtime_mark_last_busy(&parent_sw->dev);
pm_runtime_put_autosuspend(&parent_sw->dev);
tb_switch_put(parent_sw);
}
static void
icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
{
const struct icm_fr_event_device_disconnected *pkg =
(const struct icm_fr_event_device_disconnected *)hdr;
struct tb_switch *sw;
u8 link, depth;
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
return;
}
sw = tb_switch_find_by_link_depth(tb, link, depth);
if (!sw) {
tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
depth);
return;
}
pm_runtime_get_sync(sw->dev.parent);
remove_switch(sw);
pm_runtime_mark_last_busy(sw->dev.parent);
pm_runtime_put_autosuspend(sw->dev.parent);
tb_switch_put(sw);
}
static void
icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
{
const struct icm_fr_event_xdomain_connected *pkg =
(const struct icm_fr_event_xdomain_connected *)hdr;
struct tb_xdomain *xd;
struct tb_switch *sw;
u8 link, depth;
u64 route;
link = pkg->link_info & ICM_LINK_INFO_LINK_MASK;
depth = (pkg->link_info & ICM_LINK_INFO_DEPTH_MASK) >>
ICM_LINK_INFO_DEPTH_SHIFT;
if (link > ICM_MAX_LINK || depth > TB_SWITCH_MAX_DEPTH) {
tb_warn(tb, "invalid topology %u.%u, ignoring\n", link, depth);
return;
}
route = get_route(pkg->local_route_hi, pkg->local_route_lo);
xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
if (xd) {
u8 xd_phy_port, phy_port;
xd_phy_port = phy_port_from_route(xd->route, xd->depth);
phy_port = phy_port_from_route(route, depth);
if (xd->depth == depth && xd_phy_port == phy_port) {
update_xdomain(xd, route, link);
tb_xdomain_put(xd);
return;
}
/*
* If we find an existing XDomain connection remove it
* now. We need to go through login handshake and
* everything anyway to be able to re-establish the
* connection.
*/
remove_xdomain(xd);
tb_xdomain_put(xd);
}
/*
* Look if there already exists an XDomain in the same place
* than the new one and in that case remove it because it is
* most likely another host that got disconnected.
*/
xd = tb_xdomain_find_by_link_depth(tb, link, depth);
if (!xd) {
u8 dual_link;
dual_link = dual_link_from_link(link);
if (dual_link)
xd = tb_xdomain_find_by_link_depth(tb, dual_link,
depth);
}
if (xd) {
remove_xdomain(xd);
tb_xdomain_put(xd);
}
/*
* If the user disconnected a switch during suspend and
* connected another host to the same port, remove the switch
* first.
*/
sw = tb_switch_find_by_route(tb, route);
if (sw) {
remove_switch(sw);
tb_switch_put(sw);
}
sw = tb_switch_find_by_link_depth(tb, link, depth);
if (!sw) {
tb_warn(tb, "no switch exists at %u.%u, ignoring\n", link,
depth);
return;
}
add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link,
depth);
tb_switch_put(sw);
}
static void
icm_fr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
{
const struct icm_fr_event_xdomain_disconnected *pkg =
(const struct icm_fr_event_xdomain_disconnected *)hdr;
struct tb_xdomain *xd;
/*
* If the connection is through one or multiple devices, the
* XDomain device is removed along with them so it is fine if we
* cannot find it here.
*/
xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
if (xd) {
remove_xdomain(xd);
tb_xdomain_put(xd);
}
}
static int icm_tr_cio_reset(struct tb *tb)
{
return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x777, BIT(1));
}
static int
icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level,
u8 *proto_version, size_t *nboot_acl, bool *rpm)
{
struct icm_tr_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
.hdr.code = ICM_DRIVER_READY,
};
int ret;
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, 20000);
if (ret)
return ret;
if (security_level)
*security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK;
if (proto_version)
*proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >>
ICM_TR_INFO_PROTO_VERSION_SHIFT;
if (nboot_acl)
*nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >>
ICM_TR_INFO_BOOT_ACL_SHIFT;
if (rpm)
*rpm = !!(reply.hdr.flags & ICM_TR_FLAGS_RTD3);
return 0;
}
static int icm_tr_approve_switch(struct tb *tb, struct tb_switch *sw)
{
struct icm_tr_pkg_approve_device request;
struct icm_tr_pkg_approve_device reply;
int ret;
memset(&request, 0, sizeof(request));
memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
request.hdr.code = ICM_APPROVE_DEVICE;
request.route_lo = sw->config.route_lo;
request.route_hi = sw->config.route_hi;
request.connection_id = sw->connection_id;
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_APPROVE_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR) {
tb_warn(tb, "PCIe tunnel creation failed\n");
return -EIO;
}
return 0;
}
static int icm_tr_add_switch_key(struct tb *tb, struct tb_switch *sw)
{
struct icm_tr_pkg_add_device_key_response reply;
struct icm_tr_pkg_add_device_key request;
int ret;
memset(&request, 0, sizeof(request));
memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
request.hdr.code = ICM_ADD_DEVICE_KEY;
request.route_lo = sw->config.route_lo;
request.route_hi = sw->config.route_hi;
request.connection_id = sw->connection_id;
memcpy(request.key, sw->key, TB_SWITCH_KEY_SIZE);
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR) {
tb_warn(tb, "Adding key to switch failed\n");
return -EIO;
}
return 0;
}
static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw,
const u8 *challenge, u8 *response)
{
struct icm_tr_pkg_challenge_device_response reply;
struct icm_tr_pkg_challenge_device request;
int ret;
memset(&request, 0, sizeof(request));
memcpy(&request.ep_uuid, sw->uuid, sizeof(request.ep_uuid));
request.hdr.code = ICM_CHALLENGE_DEVICE;
request.route_lo = sw->config.route_lo;
request.route_hi = sw->config.route_hi;
request.connection_id = sw->connection_id;
memcpy(request.challenge, challenge, TB_SWITCH_KEY_SIZE);
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EKEYREJECTED;
if (reply.hdr.flags & ICM_FLAGS_NO_KEY)
return -ENOKEY;
memcpy(response, reply.response, TB_SWITCH_KEY_SIZE);
return 0;
}
static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{
struct icm_tr_pkg_approve_xdomain_response reply;
struct icm_tr_pkg_approve_xdomain request;
int ret;
memset(&request, 0, sizeof(request));
request.hdr.code = ICM_APPROVE_XDOMAIN;
request.route_hi = upper_32_bits(xd->route);
request.route_lo = lower_32_bits(xd->route);
request.transmit_path = transmit_path;
request.transmit_ring = transmit_ring;
request.receive_path = receive_path;
request.receive_ring = receive_ring;
memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
return 0;
}
static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd,
int stage)
{
struct icm_tr_pkg_disconnect_xdomain_response reply;
struct icm_tr_pkg_disconnect_xdomain request;
int ret;
memset(&request, 0, sizeof(request));
request.hdr.code = ICM_DISCONNECT_XDOMAIN;
request.stage = stage;
request.route_hi = upper_32_bits(xd->route);
request.route_lo = lower_32_bits(xd->route);
memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid));
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
return 0;
}
static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{
int ret;
ret = icm_tr_xdomain_tear_down(tb, xd, 1);
if (ret)
return ret;
usleep_range(10, 50);
return icm_tr_xdomain_tear_down(tb, xd, 2);
}
static void
__icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr,
bool force_rtd3)
{
const struct icm_tr_event_device_connected *pkg =
(const struct icm_tr_event_device_connected *)hdr;
bool authorized, boot, dual_lane, speed_gen3;
enum tb_security_level security_level;
struct tb_switch *sw, *parent_sw;
struct tb_xdomain *xd;
u64 route;
icm_postpone_rescan(tb);
/*
* Currently we don't use the QoS information coming with the
* device connected message so simply just ignore that extra
* packet for now.
*/
if (pkg->hdr.packet_id)
return;
route = get_route(pkg->route_hi, pkg->route_lo);
authorized = pkg->link_info & ICM_LINK_INFO_APPROVED;
security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >>
ICM_FLAGS_SLEVEL_SHIFT;
boot = pkg->link_info & ICM_LINK_INFO_BOOT;
dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE;
speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3;
if (pkg->link_info & ICM_LINK_INFO_REJECTED) {
tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n",
route);
return;
}
sw = tb_switch_find_by_uuid(tb, &pkg->ep_uuid);
if (sw) {
/* Update the switch if it is still in the same place */
if (tb_route(sw) == route && !!sw->authorized == authorized) {
update_switch(sw, route, pkg->connection_id, 0, 0, 0,
boot);
tb_switch_put(sw);
return;
}
remove_switch(sw);
tb_switch_put(sw);
}
/* Another switch with the same address */
sw = tb_switch_find_by_route(tb, route);
if (sw) {
remove_switch(sw);
tb_switch_put(sw);
}
/* XDomain connection with the same address */
xd = tb_xdomain_find_by_route(tb, route);
if (xd) {
remove_xdomain(xd);
tb_xdomain_put(xd);
}
parent_sw = tb_switch_find_by_route(tb, get_parent_route(route));
if (!parent_sw) {
tb_err(tb, "failed to find parent switch for %llx\n", route);
return;
}
pm_runtime_get_sync(&parent_sw->dev);
sw = alloc_switch(parent_sw, route, &pkg->ep_uuid);
if (!IS_ERR(sw)) {
sw->connection_id = pkg->connection_id;
sw->authorized = authorized;
sw->security_level = security_level;
sw->boot = boot;
sw->link_speed = speed_gen3 ? 20 : 10;
sw->link_width = dual_lane ? TB_LINK_WIDTH_DUAL :
TB_LINK_WIDTH_SINGLE;
sw->rpm = force_rtd3;
if (!sw->rpm)
sw->rpm = intel_vss_is_rtd3(pkg->ep_name,
sizeof(pkg->ep_name));
if (add_switch(parent_sw, sw))
tb_switch_put(sw);
}
pm_runtime_mark_last_busy(&parent_sw->dev);
pm_runtime_put_autosuspend(&parent_sw->dev);
tb_switch_put(parent_sw);
}
static void
icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
{
__icm_tr_device_connected(tb, hdr, false);
}
static void
icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
{
const struct icm_tr_event_device_disconnected *pkg =
(const struct icm_tr_event_device_disconnected *)hdr;
struct tb_switch *sw;
u64 route;
route = get_route(pkg->route_hi, pkg->route_lo);
sw = tb_switch_find_by_route(tb, route);
if (!sw) {
tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
return;
}
pm_runtime_get_sync(sw->dev.parent);
remove_switch(sw);
pm_runtime_mark_last_busy(sw->dev.parent);
pm_runtime_put_autosuspend(sw->dev.parent);
tb_switch_put(sw);
}
static void
icm_tr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
{
const struct icm_tr_event_xdomain_connected *pkg =
(const struct icm_tr_event_xdomain_connected *)hdr;
struct tb_xdomain *xd;
struct tb_switch *sw;
u64 route;
if (!tb->root_switch)
return;
route = get_route(pkg->local_route_hi, pkg->local_route_lo);
xd = tb_xdomain_find_by_uuid(tb, &pkg->remote_uuid);
if (xd) {
if (xd->route == route) {
update_xdomain(xd, route, 0);
tb_xdomain_put(xd);
return;
}
remove_xdomain(xd);
tb_xdomain_put(xd);
}
/* An existing xdomain with the same address */
xd = tb_xdomain_find_by_route(tb, route);
if (xd) {
remove_xdomain(xd);
tb_xdomain_put(xd);
}
/*
* If the user disconnected a switch during suspend and
* connected another host to the same port, remove the switch
* first.
*/
sw = tb_switch_find_by_route(tb, route);
if (sw) {
remove_switch(sw);
tb_switch_put(sw);
}
sw = tb_switch_find_by_route(tb, get_parent_route(route));
if (!sw) {
tb_warn(tb, "no switch exists at %llx, ignoring\n", route);
return;
}
add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0);
tb_switch_put(sw);
}
static void
icm_tr_xdomain_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
{
const struct icm_tr_event_xdomain_disconnected *pkg =
(const struct icm_tr_event_xdomain_disconnected *)hdr;
struct tb_xdomain *xd;
u64 route;
route = get_route(pkg->route_hi, pkg->route_lo);
xd = tb_xdomain_find_by_route(tb, route);
if (xd) {
remove_xdomain(xd);
tb_xdomain_put(xd);
}
}
static struct pci_dev *get_upstream_port(struct pci_dev *pdev)
{
struct pci_dev *parent;
parent = pci_upstream_bridge(pdev);
while (parent) {
if (!pci_is_pcie(parent))
return NULL;
if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM)
break;
parent = pci_upstream_bridge(parent);
}
if (!parent)
return NULL;
switch (parent->device) {
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE:
return parent;
}
return NULL;
}
static bool icm_ar_is_supported(struct tb *tb)
{
struct pci_dev *upstream_port;
struct icm *icm = tb_priv(tb);
/*
* Starting from Alpine Ridge we can use ICM on Apple machines
* as well. We just need to reset and re-enable it first.
* However, only start it if explicitly asked by the user.
*/
if (icm_firmware_running(tb->nhi))
return true;
if (!start_icm)
return false;
/*
* Find the upstream PCIe port in case we need to do reset
* through its vendor specific registers.
*/
upstream_port = get_upstream_port(tb->nhi->pdev);
if (upstream_port) {
int cap;
cap = pci_find_ext_capability(upstream_port,
PCI_EXT_CAP_ID_VNDR);
if (cap > 0) {
icm->upstream_port = upstream_port;
icm->vnd_cap = cap;
return true;
}
}
return false;
}
static int icm_ar_cio_reset(struct tb *tb)
{
return pcie2cio_write(tb_priv(tb), TB_CFG_SWITCH, 0, 0x50, BIT(9));
}
static int icm_ar_get_mode(struct tb *tb)
{
struct tb_nhi *nhi = tb->nhi;
int retries = 60;
u32 val;
do {
val = ioread32(nhi->iobase + REG_FW_STS);
if (val & REG_FW_STS_NVM_AUTH_DONE)
break;
msleep(50);
} while (--retries);
if (!retries) {
dev_err(&nhi->pdev->dev, "ICM firmware not authenticated\n");
return -ENODEV;
}
return nhi_mailbox_mode(nhi);
}
static int
icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level,
u8 *proto_version, size_t *nboot_acl, bool *rpm)
{
struct icm_ar_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
.hdr.code = ICM_DRIVER_READY,
};
int ret;
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (security_level)
*security_level = reply.info & ICM_AR_INFO_SLEVEL_MASK;
if (nboot_acl && (reply.info & ICM_AR_INFO_BOOT_ACL_SUPPORTED))
*nboot_acl = (reply.info & ICM_AR_INFO_BOOT_ACL_MASK) >>
ICM_AR_INFO_BOOT_ACL_SHIFT;
if (rpm)
*rpm = !!(reply.hdr.flags & ICM_AR_FLAGS_RTD3);
return 0;
}
static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route)
{
struct icm_ar_pkg_get_route_response reply;
struct icm_ar_pkg_get_route request = {
.hdr = { .code = ICM_GET_ROUTE },
.link_info = depth << ICM_LINK_INFO_DEPTH_SHIFT | link,
};
int ret;
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
*route = get_route(reply.route_hi, reply.route_lo);
return 0;
}
static int icm_ar_get_boot_acl(struct tb *tb, uuid_t *uuids, size_t nuuids)
{
struct icm_ar_pkg_preboot_acl_response reply;
struct icm_ar_pkg_preboot_acl request = {
.hdr = { .code = ICM_PREBOOT_ACL },
};
int ret, i;
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
for (i = 0; i < nuuids; i++) {
u32 *uuid = (u32 *)&uuids[i];
uuid[0] = reply.acl[i].uuid_lo;
uuid[1] = reply.acl[i].uuid_hi;
if (uuid[0] == 0xffffffff && uuid[1] == 0xffffffff) {
/* Map empty entries to null UUID */
uuid[0] = 0;
uuid[1] = 0;
} else if (uuid[0] != 0 || uuid[1] != 0) {
/* Upper two DWs are always one's */
uuid[2] = 0xffffffff;
uuid[3] = 0xffffffff;
}
}
return ret;
}
static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids,
size_t nuuids)
{
struct icm_ar_pkg_preboot_acl_response reply;
struct icm_ar_pkg_preboot_acl request = {
.hdr = {
.code = ICM_PREBOOT_ACL,
.flags = ICM_FLAGS_WRITE,
},
};
int ret, i;
for (i = 0; i < nuuids; i++) {
const u32 *uuid = (const u32 *)&uuids[i];
if (uuid_is_null(&uuids[i])) {
/*
* Map null UUID to the empty (all one) entries
* for ICM.
*/
request.acl[i].uuid_lo = 0xffffffff;
request.acl[i].uuid_hi = 0xffffffff;
} else {
/* Two high DWs need to be set to all one */
if (uuid[2] != 0xffffffff || uuid[3] != 0xffffffff)
return -EINVAL;
request.acl[i].uuid_lo = uuid[0];
request.acl[i].uuid_hi = uuid[1];
}
}
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
return 0;
}
static int
icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level,
u8 *proto_version, size_t *nboot_acl, bool *rpm)
{
struct icm_tr_pkg_driver_ready_response reply;
struct icm_pkg_driver_ready request = {
.hdr.code = ICM_DRIVER_READY,
};
int ret;
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, 20000);
if (ret)
return ret;
if (proto_version)
*proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >>
ICM_TR_INFO_PROTO_VERSION_SHIFT;
/* Ice Lake always supports RTD3 */
if (rpm)
*rpm = true;
return 0;
}
static void icm_icl_set_uuid(struct tb *tb)
{
struct tb_nhi *nhi = tb->nhi;
u32 uuid[4];
pci_read_config_dword(nhi->pdev, VS_CAP_10, &uuid[0]);
pci_read_config_dword(nhi->pdev, VS_CAP_11, &uuid[1]);
uuid[2] = 0xffffffff;
uuid[3] = 0xffffffff;
tb->root_switch->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
}
static void
icm_icl_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
{
__icm_tr_device_connected(tb, hdr, true);
}
static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr)
{
const struct icm_icl_event_rtd3_veto *pkg =
(const struct icm_icl_event_rtd3_veto *)hdr;
tb_dbg(tb, "ICM rtd3 veto=0x%08x\n", pkg->veto_reason);
if (pkg->veto_reason)
icm_veto_begin(tb);
else
icm_veto_end(tb);
}
static bool icm_tgl_is_supported(struct tb *tb)
{
unsigned long end = jiffies + msecs_to_jiffies(10);
do {
u32 val;
val = ioread32(tb->nhi->iobase + REG_FW_STS);
if (val & REG_FW_STS_NVM_AUTH_DONE)
return true;
usleep_range(100, 500);
} while (time_before(jiffies, end));
return false;
}
static void icm_handle_notification(struct work_struct *work)
{
struct icm_notification *n = container_of(work, typeof(*n), work);
struct tb *tb = n->tb;
struct icm *icm = tb_priv(tb);
mutex_lock(&tb->lock);
/*
* When the domain is stopped we flush its workqueue but before
* that the root switch is removed. In that case we should treat
* the queued events as being canceled.
*/
if (tb->root_switch) {
switch (n->pkg->code) {
case ICM_EVENT_DEVICE_CONNECTED:
icm->device_connected(tb, n->pkg);
break;
case ICM_EVENT_DEVICE_DISCONNECTED:
icm->device_disconnected(tb, n->pkg);
break;
case ICM_EVENT_XDOMAIN_CONNECTED:
if (tb_is_xdomain_enabled())
icm->xdomain_connected(tb, n->pkg);
break;
case ICM_EVENT_XDOMAIN_DISCONNECTED:
if (tb_is_xdomain_enabled())
icm->xdomain_disconnected(tb, n->pkg);
break;
case ICM_EVENT_RTD3_VETO:
icm->rtd3_veto(tb, n->pkg);
break;
}
}
mutex_unlock(&tb->lock);
kfree(n->pkg);
kfree(n);
}
static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
struct icm_notification *n;
n = kmalloc(sizeof(*n), GFP_KERNEL);
if (!n)
return;
n->pkg = kmemdup(buf, size, GFP_KERNEL);
if (!n->pkg) {
kfree(n);
return;
}
INIT_WORK(&n->work, icm_handle_notification);
n->tb = tb;
queue_work(tb->wq, &n->work);
}
static int
__icm_driver_ready(struct tb *tb, enum tb_security_level *security_level,
u8 *proto_version, size_t *nboot_acl, bool *rpm)
{
struct icm *icm = tb_priv(tb);
unsigned int retries = 50;
int ret;
ret = icm->driver_ready(tb, security_level, proto_version, nboot_acl,
rpm);
if (ret) {
tb_err(tb, "failed to send driver ready to ICM\n");
return ret;
}
/*
* Hold on here until the switch config space is accessible so
* that we can read root switch config successfully.
*/
do {
struct tb_cfg_result res;
u32 tmp;
res = tb_cfg_read_raw(tb->ctl, &tmp, 0, 0, TB_CFG_SWITCH,
0, 1, 100);
if (!res.err)
return 0;
msleep(50);
} while (--retries);
tb_err(tb, "failed to read root switch config space, giving up\n");
return -ETIMEDOUT;
}
static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
{
struct icm *icm = tb_priv(tb);
u32 val;
if (!icm->upstream_port)
return -ENODEV;
/* Put ARC to wait for CIO reset event to happen */
val = ioread32(nhi->iobase + REG_FW_STS);
val |= REG_FW_STS_CIO_RESET_REQ;
iowrite32(val, nhi->iobase + REG_FW_STS);
/* Re-start ARC */
val = ioread32(nhi->iobase + REG_FW_STS);
val |= REG_FW_STS_ICM_EN_INVERT;
val |= REG_FW_STS_ICM_EN_CPU;
iowrite32(val, nhi->iobase + REG_FW_STS);
/* Trigger CIO reset now */
return icm->cio_reset(tb);
}
static int icm_firmware_start(struct tb *tb, struct tb_nhi *nhi)
{
unsigned int retries = 10;
int ret;
u32 val;
/* Check if the ICM firmware is already running */
if (icm_firmware_running(nhi))
return 0;
dev_dbg(&nhi->pdev->dev, "starting ICM firmware\n");
ret = icm_firmware_reset(tb, nhi);
if (ret)
return ret;
/* Wait until the ICM firmware tells us it is up and running */
do {
/* Check that the ICM firmware is running */
val = ioread32(nhi->iobase + REG_FW_STS);
if (val & REG_FW_STS_NVM_AUTH_DONE)
return 0;
msleep(300);
} while (--retries);
return -ETIMEDOUT;
}
static int icm_reset_phy_port(struct tb *tb, int phy_port)
{
struct icm *icm = tb_priv(tb);
u32 state0, state1;
int port0, port1;
u32 val0, val1;
int ret;
if (!icm->upstream_port)
return 0;
if (phy_port) {
port0 = 3;
port1 = 4;
} else {
port0 = 1;
port1 = 2;
}
/*
* Read link status of both null ports belonging to a single
* physical port.
*/
ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
if (ret)
return ret;
ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
if (ret)
return ret;
state0 = val0 & PHY_PORT_CS1_LINK_STATE_MASK;
state0 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
state1 = val1 & PHY_PORT_CS1_LINK_STATE_MASK;
state1 >>= PHY_PORT_CS1_LINK_STATE_SHIFT;
/* If they are both up we need to reset them now */
if (state0 != TB_PORT_UP || state1 != TB_PORT_UP)
return 0;
val0 |= PHY_PORT_CS1_LINK_DISABLE;
ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
if (ret)
return ret;
val1 |= PHY_PORT_CS1_LINK_DISABLE;
ret = pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
if (ret)
return ret;
/* Wait a bit and then re-enable both ports */
usleep_range(10, 100);
ret = pcie2cio_read(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, &val0);
if (ret)
return ret;
ret = pcie2cio_read(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, &val1);
if (ret)
return ret;
val0 &= ~PHY_PORT_CS1_LINK_DISABLE;
ret = pcie2cio_write(icm, TB_CFG_PORT, port0, PHY_PORT_CS1, val0);
if (ret)
return ret;
val1 &= ~PHY_PORT_CS1_LINK_DISABLE;
return pcie2cio_write(icm, TB_CFG_PORT, port1, PHY_PORT_CS1, val1);
}
static int icm_firmware_init(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
struct tb_nhi *nhi = tb->nhi;
int ret;
ret = icm_firmware_start(tb, nhi);
if (ret) {
dev_err(&nhi->pdev->dev, "could not start ICM firmware\n");
return ret;
}
if (icm->get_mode) {
ret = icm->get_mode(tb);
switch (ret) {
case NHI_FW_SAFE_MODE:
icm->safe_mode = true;
break;
case NHI_FW_CM_MODE:
/* Ask ICM to accept all Thunderbolt devices */
nhi_mailbox_cmd(nhi, NHI_MAILBOX_ALLOW_ALL_DEVS, 0);
break;
default:
if (ret < 0)
return ret;
tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
return -ENODEV;
}
}
/*
* Reset both physical ports if there is anything connected to
* them already.
*/
ret = icm_reset_phy_port(tb, 0);
if (ret)
dev_warn(&nhi->pdev->dev, "failed to reset links on port0\n");
ret = icm_reset_phy_port(tb, 1);
if (ret)
dev_warn(&nhi->pdev->dev, "failed to reset links on port1\n");
return 0;
}
static int icm_driver_ready(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
int ret;
ret = icm_firmware_init(tb);
if (ret)
return ret;
if (icm->safe_mode) {
tb_info(tb, "Thunderbolt host controller is in safe mode.\n");
tb_info(tb, "You need to update NVM firmware of the controller before it can be used.\n");
tb_info(tb, "For latest updates check https://thunderbolttechnology.net/updates.\n");
return 0;
}
ret = __icm_driver_ready(tb, &tb->security_level, &icm->proto_version,
&tb->nboot_acl, &icm->rpm);
if (ret)
return ret;
/*
* Make sure the number of supported preboot ACL matches what we
* expect or disable the whole feature.
*/
if (tb->nboot_acl > icm->max_boot_acl)
tb->nboot_acl = 0;
if (icm->proto_version >= 3)
tb_dbg(tb, "USB4 proxy operations supported\n");
return 0;
}
static int icm_suspend(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
if (icm->save_devices)
icm->save_devices(tb);
nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
return 0;
}
/*
* Mark all switches (except root switch) below this one unplugged. ICM
* firmware will send us an updated list of switches after we have send
* it driver ready command. If a switch is not in that list it will be
* removed when we perform rescan.
*/
static void icm_unplug_children(struct tb_switch *sw)
{
struct tb_port *port;
if (tb_route(sw))
sw->is_unplugged = true;
tb_switch_for_each_port(sw, port) {
if (port->xdomain)
port->xdomain->is_unplugged = true;
else if (tb_port_has_remote(port))
icm_unplug_children(port->remote->sw);
}
}
static int complete_rpm(struct device *dev, void *data)
{
struct tb_switch *sw = tb_to_switch(dev);
if (sw)
complete(&sw->rpm_complete);
return 0;
}
static void remove_unplugged_switch(struct tb_switch *sw)
{
struct device *parent = get_device(sw->dev.parent);
pm_runtime_get_sync(parent);
/*
* Signal this and switches below for rpm_complete because
* tb_switch_remove() calls pm_runtime_get_sync() that then waits
* for it.
*/
complete_rpm(&sw->dev, NULL);
bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm);
tb_switch_remove(sw);
pm_runtime_mark_last_busy(parent);
pm_runtime_put_autosuspend(parent);
put_device(parent);
}
static void icm_free_unplugged_children(struct tb_switch *sw)
{
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
if (port->xdomain && port->xdomain->is_unplugged) {
tb_xdomain_remove(port->xdomain);
port->xdomain = NULL;
} else if (tb_port_has_remote(port)) {
if (port->remote->sw->is_unplugged) {
remove_unplugged_switch(port->remote->sw);
port->remote = NULL;
} else {
icm_free_unplugged_children(port->remote->sw);
}
}
}
}
static void icm_rescan_work(struct work_struct *work)
{
struct icm *icm = container_of(work, struct icm, rescan_work.work);
struct tb *tb = icm_to_tb(icm);
mutex_lock(&tb->lock);
if (tb->root_switch)
icm_free_unplugged_children(tb->root_switch);
mutex_unlock(&tb->lock);
}
static void icm_complete(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
if (tb->nhi->going_away)
return;
/*
* If RTD3 was vetoed before we entered system suspend allow it
* again now before driver ready is sent. Firmware sends a new RTD3
* veto if it is still the case after we have sent it driver ready
* command.
*/
icm_veto_end(tb);
icm_unplug_children(tb->root_switch);
/*
* Now all existing children should be resumed, start events
* from ICM to get updated status.
*/
__icm_driver_ready(tb, NULL, NULL, NULL, NULL);
/*
* We do not get notifications of devices that have been
* unplugged during suspend so schedule rescan to clean them up
* if any.
*/
queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500));
}
static int icm_runtime_suspend(struct tb *tb)
{
nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
return 0;
}
static int icm_runtime_suspend_switch(struct tb_switch *sw)
{
if (tb_route(sw))
reinit_completion(&sw->rpm_complete);
return 0;
}
static int icm_runtime_resume_switch(struct tb_switch *sw)
{
if (tb_route(sw)) {
if (!wait_for_completion_timeout(&sw->rpm_complete,
msecs_to_jiffies(500))) {
dev_dbg(&sw->dev, "runtime resuming timed out\n");
}
}
return 0;
}
static int icm_runtime_resume(struct tb *tb)
{
/*
* We can reuse the same resume functionality than with system
* suspend.
*/
icm_complete(tb);
return 0;
}
static int icm_start(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
int ret;
if (icm->safe_mode)
tb->root_switch = tb_switch_alloc_safe_mode(tb, &tb->dev, 0);
else
tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
if (IS_ERR(tb->root_switch))
return PTR_ERR(tb->root_switch);
tb->root_switch->no_nvm_upgrade = !icm->can_upgrade_nvm;
tb->root_switch->rpm = icm->rpm;
if (icm->set_uuid)
icm->set_uuid(tb);
ret = tb_switch_add(tb->root_switch);
if (ret) {
tb_switch_put(tb->root_switch);
tb->root_switch = NULL;
}
return ret;
}
static void icm_stop(struct tb *tb)
{
struct icm *icm = tb_priv(tb);
cancel_delayed_work(&icm->rescan_work);
tb_switch_remove(tb->root_switch);
tb->root_switch = NULL;
nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0);
kfree(icm->last_nvm_auth);
icm->last_nvm_auth = NULL;
}
static int icm_disconnect_pcie_paths(struct tb *tb)
{
return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0);
}
static void icm_usb4_switch_nvm_auth_complete(void *data)
{
struct usb4_switch_nvm_auth *auth = data;
struct icm *icm = auth->icm;
struct tb *tb = icm_to_tb(icm);
tb_dbg(tb, "NVM_AUTH response for %llx flags %#x status %#x\n",
get_route(auth->reply.route_hi, auth->reply.route_lo),
auth->reply.hdr.flags, auth->reply.status);
mutex_lock(&tb->lock);
if (WARN_ON(icm->last_nvm_auth))
kfree(icm->last_nvm_auth);
icm->last_nvm_auth = auth;
mutex_unlock(&tb->lock);
}
static int icm_usb4_switch_nvm_authenticate(struct tb *tb, u64 route)
{
struct usb4_switch_nvm_auth *auth;
struct icm *icm = tb_priv(tb);
struct tb_cfg_request *req;
int ret;
auth = kzalloc(sizeof(*auth), GFP_KERNEL);
if (!auth)
return -ENOMEM;
auth->icm = icm;
auth->request.hdr.code = ICM_USB4_SWITCH_OP;
auth->request.route_hi = upper_32_bits(route);
auth->request.route_lo = lower_32_bits(route);
auth->request.opcode = USB4_SWITCH_OP_NVM_AUTH;
req = tb_cfg_request_alloc();
if (!req) {
ret = -ENOMEM;
goto err_free_auth;
}
req->match = icm_match;
req->copy = icm_copy;
req->request = &auth->request;
req->request_size = sizeof(auth->request);
req->request_type = TB_CFG_PKG_ICM_CMD;
req->response = &auth->reply;
req->npackets = 1;
req->response_size = sizeof(auth->reply);
req->response_type = TB_CFG_PKG_ICM_RESP;
tb_dbg(tb, "NVM_AUTH request for %llx\n", route);
mutex_lock(&icm->request_lock);
ret = tb_cfg_request(tb->ctl, req, icm_usb4_switch_nvm_auth_complete,
auth);
mutex_unlock(&icm->request_lock);
tb_cfg_request_put(req);
if (ret)
goto err_free_auth;
return 0;
err_free_auth:
kfree(auth);
return ret;
}
static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
u8 *status, const void *tx_data, size_t tx_data_len,
void *rx_data, size_t rx_data_len)
{
struct icm_usb4_switch_op_response reply;
struct icm_usb4_switch_op request;
struct tb *tb = sw->tb;
struct icm *icm = tb_priv(tb);
u64 route = tb_route(sw);
int ret;
/*
* USB4 router operation proxy is supported in firmware if the
* protocol version is 3 or higher.
*/
if (icm->proto_version < 3)
return -EOPNOTSUPP;
/*
* NVM_AUTH is a special USB4 proxy operation that does not
* return immediately so handle it separately.
*/
if (opcode == USB4_SWITCH_OP_NVM_AUTH)
return icm_usb4_switch_nvm_authenticate(tb, route);
memset(&request, 0, sizeof(request));
request.hdr.code = ICM_USB4_SWITCH_OP;
request.route_hi = upper_32_bits(route);
request.route_lo = lower_32_bits(route);
request.opcode = opcode;
if (metadata)
request.metadata = *metadata;
if (tx_data_len) {
request.data_len_valid |= ICM_USB4_SWITCH_DATA_VALID;
if (tx_data_len < ARRAY_SIZE(request.data))
request.data_len_valid =
tx_data_len & ICM_USB4_SWITCH_DATA_LEN_MASK;
memcpy(request.data, tx_data, tx_data_len * sizeof(u32));
}
memset(&reply, 0, sizeof(reply));
ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply),
1, ICM_TIMEOUT);
if (ret)
return ret;
if (reply.hdr.flags & ICM_FLAGS_ERROR)
return -EIO;
if (status)
*status = reply.status;
if (metadata)
*metadata = reply.metadata;
if (rx_data_len)
memcpy(rx_data, reply.data, rx_data_len * sizeof(u32));
return 0;
}
static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw,
u32 *status)
{
struct usb4_switch_nvm_auth *auth;
struct tb *tb = sw->tb;
struct icm *icm = tb_priv(tb);
int ret = 0;
if (icm->proto_version < 3)
return -EOPNOTSUPP;
auth = icm->last_nvm_auth;
icm->last_nvm_auth = NULL;
if (auth && auth->reply.route_hi == sw->config.route_hi &&
auth->reply.route_lo == sw->config.route_lo) {
tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n",
tb_route(sw), auth->reply.hdr.flags, auth->reply.status);
if (auth->reply.hdr.flags & ICM_FLAGS_ERROR)
ret = -EIO;
else
*status = auth->reply.status;
} else {
*status = 0;
}
kfree(auth);
return ret;
}
/* Falcon Ridge */
static const struct tb_cm_ops icm_fr_ops = {
.driver_ready = icm_driver_ready,
.start = icm_start,
.stop = icm_stop,
.suspend = icm_suspend,
.complete = icm_complete,
.handle_event = icm_handle_event,
.approve_switch = icm_fr_approve_switch,
.add_switch_key = icm_fr_add_switch_key,
.challenge_switch_key = icm_fr_challenge_switch_key,
.disconnect_pcie_paths = icm_disconnect_pcie_paths,
.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
};
/* Alpine Ridge */
static const struct tb_cm_ops icm_ar_ops = {
.driver_ready = icm_driver_ready,
.start = icm_start,
.stop = icm_stop,
.suspend = icm_suspend,
.complete = icm_complete,
.runtime_suspend = icm_runtime_suspend,
.runtime_resume = icm_runtime_resume,
.runtime_suspend_switch = icm_runtime_suspend_switch,
.runtime_resume_switch = icm_runtime_resume_switch,
.handle_event = icm_handle_event,
.get_boot_acl = icm_ar_get_boot_acl,
.set_boot_acl = icm_ar_set_boot_acl,
.approve_switch = icm_fr_approve_switch,
.add_switch_key = icm_fr_add_switch_key,
.challenge_switch_key = icm_fr_challenge_switch_key,
.disconnect_pcie_paths = icm_disconnect_pcie_paths,
.approve_xdomain_paths = icm_fr_approve_xdomain_paths,
.disconnect_xdomain_paths = icm_fr_disconnect_xdomain_paths,
};
/* Titan Ridge */
static const struct tb_cm_ops icm_tr_ops = {
.driver_ready = icm_driver_ready,
.start = icm_start,
.stop = icm_stop,
.suspend = icm_suspend,
.complete = icm_complete,
.runtime_suspend = icm_runtime_suspend,
.runtime_resume = icm_runtime_resume,
.runtime_suspend_switch = icm_runtime_suspend_switch,
.runtime_resume_switch = icm_runtime_resume_switch,
.handle_event = icm_handle_event,
.get_boot_acl = icm_ar_get_boot_acl,
.set_boot_acl = icm_ar_set_boot_acl,
.approve_switch = icm_tr_approve_switch,
.add_switch_key = icm_tr_add_switch_key,
.challenge_switch_key = icm_tr_challenge_switch_key,
.disconnect_pcie_paths = icm_disconnect_pcie_paths,
.approve_xdomain_paths = icm_tr_approve_xdomain_paths,
.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
.usb4_switch_op = icm_usb4_switch_op,
.usb4_switch_nvm_authenticate_status =
icm_usb4_switch_nvm_authenticate_status,
};
/* Ice Lake */
static const struct tb_cm_ops icm_icl_ops = {
.driver_ready = icm_driver_ready,
.start = icm_start,
.stop = icm_stop,
.complete = icm_complete,
.runtime_suspend = icm_runtime_suspend,
.runtime_resume = icm_runtime_resume,
.handle_event = icm_handle_event,
.approve_xdomain_paths = icm_tr_approve_xdomain_paths,
.disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths,
.usb4_switch_op = icm_usb4_switch_op,
.usb4_switch_nvm_authenticate_status =
icm_usb4_switch_nvm_authenticate_status,
};
struct tb *icm_probe(struct tb_nhi *nhi)
{
struct icm *icm;
struct tb *tb;
tb = tb_domain_alloc(nhi, ICM_TIMEOUT, sizeof(struct icm));
if (!tb)
return NULL;
icm = tb_priv(tb);
INIT_DELAYED_WORK(&icm->rescan_work, icm_rescan_work);
mutex_init(&icm->request_lock);
switch (nhi->pdev->device) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
icm->can_upgrade_nvm = true;
icm->is_supported = icm_fr_is_supported;
icm->get_route = icm_fr_get_route;
icm->save_devices = icm_fr_save_devices;
icm->driver_ready = icm_fr_driver_ready;
icm->device_connected = icm_fr_device_connected;
icm->device_disconnected = icm_fr_device_disconnected;
icm->xdomain_connected = icm_fr_xdomain_connected;
icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
tb->cm_ops = &icm_fr_ops;
break;
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI:
case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI:
icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
/*
* NVM upgrade has not been tested on Apple systems and
* they don't provide images publicly either. To be on
* the safe side prevent root switch NVM upgrade on Macs
* for now.
*/
icm->can_upgrade_nvm = !x86_apple_machine;
icm->is_supported = icm_ar_is_supported;
icm->cio_reset = icm_ar_cio_reset;
icm->get_mode = icm_ar_get_mode;
icm->get_route = icm_ar_get_route;
icm->save_devices = icm_fr_save_devices;
icm->driver_ready = icm_ar_driver_ready;
icm->device_connected = icm_fr_device_connected;
icm->device_disconnected = icm_fr_device_disconnected;
icm->xdomain_connected = icm_fr_xdomain_connected;
icm->xdomain_disconnected = icm_fr_xdomain_disconnected;
tb->cm_ops = &icm_ar_ops;
break;
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI:
icm->max_boot_acl = ICM_AR_PREBOOT_ACL_ENTRIES;
icm->can_upgrade_nvm = !x86_apple_machine;
icm->is_supported = icm_ar_is_supported;
icm->cio_reset = icm_tr_cio_reset;
icm->get_mode = icm_ar_get_mode;
icm->driver_ready = icm_tr_driver_ready;
icm->device_connected = icm_tr_device_connected;
icm->device_disconnected = icm_tr_device_disconnected;
icm->xdomain_connected = icm_tr_xdomain_connected;
icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
tb->cm_ops = &icm_tr_ops;
break;
case PCI_DEVICE_ID_INTEL_ICL_NHI0:
case PCI_DEVICE_ID_INTEL_ICL_NHI1:
icm->is_supported = icm_fr_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
icm->device_connected = icm_icl_device_connected;
icm->device_disconnected = icm_tr_device_disconnected;
icm->xdomain_connected = icm_tr_xdomain_connected;
icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
icm->rtd3_veto = icm_icl_rtd3_veto;
tb->cm_ops = &icm_icl_ops;
break;
case PCI_DEVICE_ID_INTEL_TGL_NHI0:
case PCI_DEVICE_ID_INTEL_TGL_NHI1:
case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
case PCI_DEVICE_ID_INTEL_ADL_NHI0:
case PCI_DEVICE_ID_INTEL_ADL_NHI1:
case PCI_DEVICE_ID_INTEL_RPL_NHI0:
case PCI_DEVICE_ID_INTEL_RPL_NHI1:
case PCI_DEVICE_ID_INTEL_MTL_M_NHI0:
case PCI_DEVICE_ID_INTEL_MTL_P_NHI0:
case PCI_DEVICE_ID_INTEL_MTL_P_NHI1:
icm->is_supported = icm_tgl_is_supported;
icm->driver_ready = icm_icl_driver_ready;
icm->set_uuid = icm_icl_set_uuid;
icm->device_connected = icm_icl_device_connected;
icm->device_disconnected = icm_tr_device_disconnected;
icm->xdomain_connected = icm_tr_xdomain_connected;
icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
icm->rtd3_veto = icm_icl_rtd3_veto;
tb->cm_ops = &icm_icl_ops;
break;
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI:
icm->is_supported = icm_tgl_is_supported;
icm->get_mode = icm_ar_get_mode;
icm->driver_ready = icm_tr_driver_ready;
icm->device_connected = icm_tr_device_connected;
icm->device_disconnected = icm_tr_device_disconnected;
icm->xdomain_connected = icm_tr_xdomain_connected;
icm->xdomain_disconnected = icm_tr_xdomain_disconnected;
tb->cm_ops = &icm_tr_ops;
break;
}
if (!icm->is_supported || !icm->is_supported(tb)) {
dev_dbg(&nhi->pdev->dev, "ICM not supported on this controller\n");
tb_domain_put(tb);
return NULL;
}
tb_dbg(tb, "using firmware connection manager\n");
return tb;
}
| linux-master | drivers/thunderbolt/icm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt driver - quirks
*
* Copyright (c) 2020 Mario Limonciello <[email protected]>
*/
#include "tb.h"
static void quirk_force_power_link(struct tb_switch *sw)
{
sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER;
tb_sw_dbg(sw, "forcing power to link controller\n");
}
static void quirk_dp_credit_allocation(struct tb_switch *sw)
{
if (sw->credit_allocation && sw->min_dp_main_credits == 56) {
sw->min_dp_main_credits = 18;
tb_sw_dbg(sw, "quirked DP main: %u\n", sw->min_dp_main_credits);
}
}
static void quirk_clx_disable(struct tb_switch *sw)
{
sw->quirks |= QUIRK_NO_CLX;
tb_sw_dbg(sw, "disabling CL states\n");
}
static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
{
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
if (!tb_port_is_usb3_down(port))
continue;
port->max_bw = 16376;
tb_port_dbg(port, "USB3 maximum bandwidth limited to %u Mb/s\n",
port->max_bw);
}
}
struct tb_quirk {
u16 hw_vendor_id;
u16 hw_device_id;
u16 vendor;
u16 device;
void (*hook)(struct tb_switch *sw);
};
static const struct tb_quirk tb_quirks[] = {
/* Dell WD19TB supports self-authentication on unplug */
{ 0x0000, 0x0000, 0x00d4, 0xb070, quirk_force_power_link },
{ 0x0000, 0x0000, 0x00d4, 0xb071, quirk_force_power_link },
/*
* Intel Goshen Ridge NVM 27 and before report wrong number of
* DP buffers.
*/
{ 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation },
/*
* Limit the maximum USB3 bandwidth for the following Intel USB4
* host routers due to a hardware issue.
*/
{ 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI0, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_ADL_NHI1, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI0, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_RPL_NHI1, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_M_NHI0, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI0, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
{ 0x0438, 0x0209, 0x0000, 0x0000, quirk_clx_disable },
{ 0x0438, 0x020a, 0x0000, 0x0000, quirk_clx_disable },
{ 0x0438, 0x020b, 0x0000, 0x0000, quirk_clx_disable },
};
/**
* tb_check_quirks() - Check for quirks to apply
* @sw: Thunderbolt switch
*
* Apply any quirks for the Thunderbolt controller.
*/
void tb_check_quirks(struct tb_switch *sw)
{
int i;
for (i = 0; i < ARRAY_SIZE(tb_quirks); i++) {
const struct tb_quirk *q = &tb_quirks[i];
if (q->hw_vendor_id && q->hw_vendor_id != sw->config.vendor_id)
continue;
if (q->hw_device_id && q->hw_device_id != sw->config.device_id)
continue;
if (q->vendor && q->vendor != sw->vendor)
continue;
if (q->device && q->device != sw->device)
continue;
tb_sw_dbg(sw, "running %ps\n", q->hook);
q->hook(sw);
}
}
| linux-master | drivers/thunderbolt/quirks.c |
// SPDX-License-Identifier: GPL-2.0
/*
* USB4 specific functionality
*
* Copyright (C) 2019, Intel Corporation
* Authors: Mika Westerberg <[email protected]>
* Rajmohan Mani <[email protected]>
*/
#include <linux/delay.h>
#include <linux/ktime.h>
#include <linux/units.h>
#include "sb_regs.h"
#include "tb.h"
#define USB4_DATA_RETRIES 3
#define USB4_DATA_DWORDS 16
enum usb4_sb_target {
USB4_SB_TARGET_ROUTER,
USB4_SB_TARGET_PARTNER,
USB4_SB_TARGET_RETIMER,
};
#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2)
#define USB4_NVM_READ_OFFSET_SHIFT 2
#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24)
#define USB4_NVM_READ_LENGTH_SHIFT 24
#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK
#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT
#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2)
#define USB4_DROM_ADDRESS_SHIFT 2
#define USB4_DROM_SIZE_MASK GENMASK(19, 15)
#define USB4_DROM_SIZE_SHIFT 15
#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0)
#define USB4_BA_LENGTH_MASK GENMASK(7, 0)
#define USB4_BA_INDEX_MASK GENMASK(15, 0)
enum usb4_ba_index {
USB4_BA_MAX_USB3 = 0x1,
USB4_BA_MIN_DP_AUX = 0x2,
USB4_BA_MIN_DP_MAIN = 0x3,
USB4_BA_MAX_PCIE = 0x4,
USB4_BA_MAX_HI = 0x5,
};
#define USB4_BA_VALUE_MASK GENMASK(31, 16)
#define USB4_BA_VALUE_SHIFT 16
static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status,
const void *tx_data, size_t tx_dwords,
void *rx_data, size_t rx_dwords)
{
u32 val;
int ret;
if (metadata) {
ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
if (ret)
return ret;
}
if (tx_dwords) {
ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9,
tx_dwords);
if (ret)
return ret;
}
val = opcode | ROUTER_CS_26_OV;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
if (ret)
return ret;
ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
if (ret)
return ret;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
if (ret)
return ret;
if (val & ROUTER_CS_26_ONS)
return -EOPNOTSUPP;
if (status)
*status = (val & ROUTER_CS_26_STATUS_MASK) >>
ROUTER_CS_26_STATUS_SHIFT;
if (metadata) {
ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1);
if (ret)
return ret;
}
if (rx_dwords) {
ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9,
rx_dwords);
if (ret)
return ret;
}
return 0;
}
static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata,
u8 *status, const void *tx_data, size_t tx_dwords,
void *rx_data, size_t rx_dwords)
{
const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
if (tx_dwords > USB4_DATA_DWORDS || rx_dwords > USB4_DATA_DWORDS)
return -EINVAL;
/*
* If the connection manager implementation provides USB4 router
* operation proxy callback, call it here instead of running the
* operation natively.
*/
if (cm_ops->usb4_switch_op) {
int ret;
ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status,
tx_data, tx_dwords, rx_data,
rx_dwords);
if (ret != -EOPNOTSUPP)
return ret;
/*
* If the proxy was not supported then run the native
* router operation instead.
*/
}
return usb4_native_switch_op(sw, opcode, metadata, status, tx_data,
tx_dwords, rx_data, rx_dwords);
}
static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status)
{
return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0);
}
static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status,
const void *tx_data, size_t tx_dwords,
void *rx_data, size_t rx_dwords)
{
return __usb4_switch_op(sw, opcode, metadata, status, tx_data,
tx_dwords, rx_data, rx_dwords);
}
static void usb4_switch_check_wakes(struct tb_switch *sw)
{
bool wakeup_usb4 = false;
struct usb4_port *usb4;
struct tb_port *port;
bool wakeup = false;
u32 val;
if (!device_may_wakeup(&sw->dev))
return;
if (tb_route(sw)) {
if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1))
return;
tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n",
(val & ROUTER_CS_6_WOPS) ? "yes" : "no",
(val & ROUTER_CS_6_WOUS) ? "yes" : "no");
wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS);
}
/*
* Check for any downstream ports for USB4 wake,
* connection wake and disconnection wake.
*/
tb_switch_for_each_port(sw, port) {
if (!port->cap_usb4)
continue;
if (tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_18, 1))
break;
tb_port_dbg(port, "USB4 wake: %s, connection wake: %s, disconnection wake: %s\n",
(val & PORT_CS_18_WOU4S) ? "yes" : "no",
(val & PORT_CS_18_WOCS) ? "yes" : "no",
(val & PORT_CS_18_WODS) ? "yes" : "no");
wakeup_usb4 = val & (PORT_CS_18_WOU4S | PORT_CS_18_WOCS |
PORT_CS_18_WODS);
usb4 = port->usb4;
if (device_may_wakeup(&usb4->dev) && wakeup_usb4)
pm_wakeup_event(&usb4->dev, 0);
wakeup |= wakeup_usb4;
}
if (wakeup)
pm_wakeup_event(&sw->dev, 0);
}
static bool link_is_usb4(struct tb_port *port)
{
u32 val;
if (!port->cap_usb4)
return false;
if (tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_18, 1))
return false;
return !(val & PORT_CS_18_TCM);
}
/**
* usb4_switch_setup() - Additional setup for USB4 device
* @sw: USB4 router to setup
*
* USB4 routers need additional settings in order to enable all the
* tunneling. This function enables USB and PCIe tunneling if it can be
* enabled (e.g the parent switch also supports them). If USB tunneling
* is not available for some reason (like that there is Thunderbolt 3
* switch upstream) then the internal xHCI controller is enabled
* instead.
*
* This does not set the configuration valid bit of the router. To do
* that call usb4_switch_configuration_valid().
*/
int usb4_switch_setup(struct tb_switch *sw)
{
struct tb_switch *parent = tb_switch_parent(sw);
struct tb_port *down;
bool tbt3, xhci;
u32 val = 0;
int ret;
usb4_switch_check_wakes(sw);
if (!tb_route(sw))
return 0;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1);
if (ret)
return ret;
down = tb_switch_downstream_port(sw);
sw->link_usb4 = link_is_usb4(down);
tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT");
xhci = val & ROUTER_CS_6_HCI;
tbt3 = !(val & ROUTER_CS_6_TNS);
tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n",
tbt3 ? "yes" : "no", xhci ? "yes" : "no");
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 &&
tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) {
val |= ROUTER_CS_5_UTO;
xhci = false;
}
/*
* Only enable PCIe tunneling if the parent router supports it
* and it is not disabled.
*/
if (tb_acpi_may_tunnel_pcie() &&
tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) {
val |= ROUTER_CS_5_PTO;
/*
* xHCI can be enabled if PCIe tunneling is supported
* and the parent does not have any USB3 dowstream
* adapters (so we cannot do USB 3.x tunneling).
*/
if (xhci)
val |= ROUTER_CS_5_HCO;
}
/* TBT3 supported by the CM */
val |= ROUTER_CS_5_C3S;
return tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
}
/**
* usb4_switch_configuration_valid() - Set tunneling configuration to be valid
* @sw: USB4 router
*
* Sets configuration valid bit for the router. Must be called before
* any tunnels can be set through the router and after
* usb4_switch_setup() has been called. Can be called to host and device
* routers (does nothing for the latter).
*
* Returns %0 in success and negative errno otherwise.
*/
int usb4_switch_configuration_valid(struct tb_switch *sw)
{
u32 val;
int ret;
if (!tb_route(sw))
return 0;
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
val |= ROUTER_CS_5_CV;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
ROUTER_CS_6_CR, 50);
}
/**
* usb4_switch_read_uid() - Read UID from USB4 router
* @sw: USB4 router
* @uid: UID is stored here
*
* Reads 64-bit UID from USB4 router config space.
*/
int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid)
{
return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2);
}
static int usb4_switch_drom_read_block(void *data,
unsigned int dwaddress, void *buf,
size_t dwords)
{
struct tb_switch *sw = data;
u8 status = 0;
u32 metadata;
int ret;
metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK;
metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) &
USB4_DROM_ADDRESS_MASK;
ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata,
&status, NULL, 0, buf, dwords);
if (ret)
return ret;
return status ? -EIO : 0;
}
/**
* usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM
* @sw: USB4 router
* @address: Byte address inside DROM to start reading
* @buf: Buffer where the DROM content is stored
* @size: Number of bytes to read from DROM
*
* Uses USB4 router operations to read router DROM. For devices this
* should always work but for hosts it may return %-EOPNOTSUPP in which
* case the host router does not have DROM.
*/
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size)
{
return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
usb4_switch_drom_read_block, sw);
}
/**
* usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding
* @sw: USB4 router
*
* Checks whether conditions are met so that lane bonding can be
* established with the upstream router. Call only for device routers.
*/
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
{
struct tb_port *up;
int ret;
u32 val;
up = tb_upstream_port(sw);
ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1);
if (ret)
return false;
return !!(val & PORT_CS_18_BE);
}
/**
* usb4_switch_set_wake() - Enabled/disable wake
* @sw: USB4 router
* @flags: Wakeup flags (%0 to disable)
*
* Enables/disables router to wake up from sleep.
*/
int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
{
struct usb4_port *usb4;
struct tb_port *port;
u64 route = tb_route(sw);
u32 val;
int ret;
/*
* Enable wakes coming from all USB4 downstream ports (from
* child routers). For device routers do this also for the
* upstream USB4 port.
*/
tb_switch_for_each_port(sw, port) {
if (!tb_port_is_null(port))
continue;
if (!route && tb_is_upstream_port(port))
continue;
if (!port->cap_usb4)
continue;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4);
if (tb_is_upstream_port(port)) {
val |= PORT_CS_19_WOU4;
} else {
bool configured = val & PORT_CS_19_PC;
usb4 = port->usb4;
if (((flags & TB_WAKE_ON_CONNECT) |
device_may_wakeup(&usb4->dev)) && !configured)
val |= PORT_CS_19_WOC;
if (((flags & TB_WAKE_ON_DISCONNECT) |
device_may_wakeup(&usb4->dev)) && configured)
val |= PORT_CS_19_WOD;
if ((flags & TB_WAKE_ON_USB4) && configured)
val |= PORT_CS_19_WOU4;
}
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
}
/*
* Enable wakes from PCIe, USB 3.x and DP on this router. Only
* needed for device routers.
*/
if (route) {
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD);
if (flags & TB_WAKE_ON_USB3)
val |= ROUTER_CS_5_WOU;
if (flags & TB_WAKE_ON_PCIE)
val |= ROUTER_CS_5_WOP;
if (flags & TB_WAKE_ON_DP)
val |= ROUTER_CS_5_WOD;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
}
return 0;
}
/**
* usb4_switch_set_sleep() - Prepare the router to enter sleep
* @sw: USB4 router
*
* Sets sleep bit for the router. Returns when the router sleep ready
* bit has been asserted.
*/
int usb4_switch_set_sleep(struct tb_switch *sw)
{
int ret;
u32 val;
/* Set sleep bit and wait for sleep ready to be asserted */
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
val |= ROUTER_CS_5_SLP;
ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1);
if (ret)
return ret;
return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
ROUTER_CS_6_SLPR, 500);
}
/**
* usb4_switch_nvm_sector_size() - Return router NVM sector size
* @sw: USB4 router
*
* If the router supports NVM operations this function returns the NVM
* sector size in bytes. If NVM operations are not supported returns
* %-EOPNOTSUPP.
*/
int usb4_switch_nvm_sector_size(struct tb_switch *sw)
{
u32 metadata;
u8 status;
int ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata,
&status);
if (ret)
return ret;
if (status)
return status == 0x2 ? -EOPNOTSUPP : -EIO;
return metadata & USB4_NVM_SECTOR_SIZE_MASK;
}
static int usb4_switch_nvm_read_block(void *data,
unsigned int dwaddress, void *buf, size_t dwords)
{
struct tb_switch *sw = data;
u8 status = 0;
u32 metadata;
int ret;
metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) &
USB4_NVM_READ_LENGTH_MASK;
metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) &
USB4_NVM_READ_OFFSET_MASK;
ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata,
&status, NULL, 0, buf, dwords);
if (ret)
return ret;
return status ? -EIO : 0;
}
/**
* usb4_switch_nvm_read() - Read arbitrary bytes from router NVM
* @sw: USB4 router
* @address: Starting address in bytes
* @buf: Read data is placed here
* @size: How many bytes to read
*
* Reads NVM contents of the router. If NVM is not supported returns
* %-EOPNOTSUPP.
*/
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
size_t size)
{
return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
usb4_switch_nvm_read_block, sw);
}
/**
* usb4_switch_nvm_set_offset() - Set NVM write offset
* @sw: USB4 router
* @address: Start offset
*
* Explicitly sets NVM write offset. Normally when writing to NVM this
* is done automatically by usb4_switch_nvm_write().
*
* Returns %0 in success and negative errno if there was a failure.
*/
int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address)
{
u32 metadata, dwaddress;
u8 status = 0;
int ret;
dwaddress = address / 4;
metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
USB4_NVM_SET_OFFSET_MASK;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata,
&status);
if (ret)
return ret;
return status ? -EIO : 0;
}
static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress,
const void *buf, size_t dwords)
{
struct tb_switch *sw = data;
u8 status;
int ret;
ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status,
buf, dwords, NULL, 0);
if (ret)
return ret;
return status ? -EIO : 0;
}
/**
* usb4_switch_nvm_write() - Write to the router NVM
* @sw: USB4 router
* @address: Start address where to write in bytes
* @buf: Pointer to the data to write
* @size: Size of @buf in bytes
*
* Writes @buf to the router NVM using USB4 router operations. If NVM
* write is not supported returns %-EOPNOTSUPP.
*/
int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address,
const void *buf, size_t size)
{
int ret;
ret = usb4_switch_nvm_set_offset(sw, address);
if (ret)
return ret;
return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
usb4_switch_nvm_write_next_block, sw);
}
/**
* usb4_switch_nvm_authenticate() - Authenticate new NVM
* @sw: USB4 router
*
* After the new NVM has been written via usb4_switch_nvm_write(), this
* function triggers NVM authentication process. The router gets power
* cycled and if the authentication is successful the new NVM starts
* running. In case of failure returns negative errno.
*
* The caller should call usb4_switch_nvm_authenticate_status() to read
* the status of the authentication after power cycle. It should be the
* first router operation to avoid the status being lost.
*/
int usb4_switch_nvm_authenticate(struct tb_switch *sw)
{
int ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL);
switch (ret) {
/*
* The router is power cycled once NVM_AUTH is started so it is
* expected to get any of the following errors back.
*/
case -EACCES:
case -ENOTCONN:
case -ETIMEDOUT:
return 0;
default:
return ret;
}
}
/**
* usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate
* @sw: USB4 router
* @status: Status code of the operation
*
* The function checks if there is status available from the last NVM
* authenticate router operation. If there is status then %0 is returned
* and the status code is placed in @status. Returns negative errno in case
* of failure.
*
* Must be called before any other router operation.
*/
int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status)
{
const struct tb_cm_ops *cm_ops = sw->tb->cm_ops;
u16 opcode;
u32 val;
int ret;
if (cm_ops->usb4_switch_nvm_authenticate_status) {
ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status);
if (ret != -EOPNOTSUPP)
return ret;
}
ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1);
if (ret)
return ret;
/* Check that the opcode is correct */
opcode = val & ROUTER_CS_26_OPCODE_MASK;
if (opcode == USB4_SWITCH_OP_NVM_AUTH) {
if (val & ROUTER_CS_26_OV)
return -EBUSY;
if (val & ROUTER_CS_26_ONS)
return -EOPNOTSUPP;
*status = (val & ROUTER_CS_26_STATUS_MASK) >>
ROUTER_CS_26_STATUS_SHIFT;
} else {
*status = 0;
}
return 0;
}
/**
* usb4_switch_credits_init() - Read buffer allocation parameters
* @sw: USB4 router
*
* Reads @sw buffer allocation parameters and initializes @sw buffer
* allocation fields accordingly. Specifically @sw->credits_allocation
* is set to %true if these parameters can be used in tunneling.
*
* Returns %0 on success and negative errno otherwise.
*/
int usb4_switch_credits_init(struct tb_switch *sw)
{
int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma;
int ret, length, i, nports;
const struct tb_port *port;
u32 data[USB4_DATA_DWORDS];
u32 metadata = 0;
u8 status = 0;
memset(data, 0, sizeof(data));
ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata,
&status, NULL, 0, data, ARRAY_SIZE(data));
if (ret)
return ret;
if (status)
return -EIO;
length = metadata & USB4_BA_LENGTH_MASK;
if (WARN_ON(length > ARRAY_SIZE(data)))
return -EMSGSIZE;
max_usb3 = -1;
min_dp_aux = -1;
min_dp_main = -1;
max_pcie = -1;
max_dma = -1;
tb_sw_dbg(sw, "credit allocation parameters:\n");
for (i = 0; i < length; i++) {
u16 index, value;
index = data[i] & USB4_BA_INDEX_MASK;
value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT;
switch (index) {
case USB4_BA_MAX_USB3:
tb_sw_dbg(sw, " USB3: %u\n", value);
max_usb3 = value;
break;
case USB4_BA_MIN_DP_AUX:
tb_sw_dbg(sw, " DP AUX: %u\n", value);
min_dp_aux = value;
break;
case USB4_BA_MIN_DP_MAIN:
tb_sw_dbg(sw, " DP main: %u\n", value);
min_dp_main = value;
break;
case USB4_BA_MAX_PCIE:
tb_sw_dbg(sw, " PCIe: %u\n", value);
max_pcie = value;
break;
case USB4_BA_MAX_HI:
tb_sw_dbg(sw, " DMA: %u\n", value);
max_dma = value;
break;
default:
tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n",
index);
break;
}
}
/*
* Validate the buffer allocation preferences. If we find
* issues, log a warning and fall back using the hard-coded
* values.
*/
/* Host router must report baMaxHI */
if (!tb_route(sw) && max_dma < 0) {
tb_sw_warn(sw, "host router is missing baMaxHI\n");
goto err_invalid;
}
nports = 0;
tb_switch_for_each_port(sw, port) {
if (tb_port_is_null(port))
nports++;
}
/* Must have DP buffer allocation (multiple USB4 ports) */
if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) {
tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n");
goto err_invalid;
}
tb_switch_for_each_port(sw, port) {
if (tb_port_is_dpout(port) && min_dp_main < 0) {
tb_sw_warn(sw, "missing baMinDPmain");
goto err_invalid;
}
if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) &&
min_dp_aux < 0) {
tb_sw_warn(sw, "missing baMinDPaux");
goto err_invalid;
}
if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) &&
max_usb3 < 0) {
tb_sw_warn(sw, "missing baMaxUSB3");
goto err_invalid;
}
if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) &&
max_pcie < 0) {
tb_sw_warn(sw, "missing baMaxPCIe");
goto err_invalid;
}
}
/*
* Buffer allocation passed the validation so we can use it in
* path creation.
*/
sw->credit_allocation = true;
if (max_usb3 > 0)
sw->max_usb3_credits = max_usb3;
if (min_dp_aux > 0)
sw->min_dp_aux_credits = min_dp_aux;
if (min_dp_main > 0)
sw->min_dp_main_credits = min_dp_main;
if (max_pcie > 0)
sw->max_pcie_credits = max_pcie;
if (max_dma > 0)
sw->max_dma_credits = max_dma;
return 0;
err_invalid:
return -EINVAL;
}
/**
* usb4_switch_query_dp_resource() - Query availability of DP IN resource
* @sw: USB4 router
* @in: DP IN adapter
*
* For DP tunneling this function can be used to query availability of
* DP IN resource. Returns true if the resource is available for DP
* tunneling, false otherwise.
*/
bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
u32 metadata = in->port;
u8 status;
int ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata,
&status);
/*
* If DP resource allocation is not supported assume it is
* always available.
*/
if (ret == -EOPNOTSUPP)
return true;
if (ret)
return false;
return !status;
}
/**
* usb4_switch_alloc_dp_resource() - Allocate DP IN resource
* @sw: USB4 router
* @in: DP IN adapter
*
* Allocates DP IN resource for DP tunneling using USB4 router
* operations. If the resource was allocated returns %0. Otherwise
* returns negative errno, in particular %-EBUSY if the resource is
* already allocated.
*/
int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
u32 metadata = in->port;
u8 status;
int ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata,
&status);
if (ret == -EOPNOTSUPP)
return 0;
if (ret)
return ret;
return status ? -EBUSY : 0;
}
/**
* usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource
* @sw: USB4 router
* @in: DP IN adapter
*
* Releases the previously allocated DP IN resource.
*/
int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
u32 metadata = in->port;
u8 status;
int ret;
ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata,
&status);
if (ret == -EOPNOTSUPP)
return 0;
if (ret)
return ret;
return status ? -EIO : 0;
}
static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port)
{
struct tb_port *p;
int usb4_idx = 0;
/* Assume port is primary */
tb_switch_for_each_port(sw, p) {
if (!tb_port_is_null(p))
continue;
if (tb_is_upstream_port(p))
continue;
if (!p->link_nr) {
if (p == port)
break;
usb4_idx++;
}
}
return usb4_idx;
}
/**
* usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter
* @sw: USB4 router
* @port: USB4 port
*
* USB4 routers have direct mapping between USB4 ports and PCIe
* downstream adapters where the PCIe topology is extended. This
* function returns the corresponding downstream PCIe adapter or %NULL
* if no such mapping was possible.
*/
struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw,
const struct tb_port *port)
{
int usb4_idx = usb4_port_idx(sw, port);
struct tb_port *p;
int pcie_idx = 0;
/* Find PCIe down port matching usb4_port */
tb_switch_for_each_port(sw, p) {
if (!tb_port_is_pcie_down(p))
continue;
if (pcie_idx == usb4_idx)
return p;
pcie_idx++;
}
return NULL;
}
/**
* usb4_switch_map_usb3_down() - Map USB4 port to a USB3 downstream adapter
* @sw: USB4 router
* @port: USB4 port
*
* USB4 routers have direct mapping between USB4 ports and USB 3.x
* downstream adapters where the USB 3.x topology is extended. This
* function returns the corresponding downstream USB 3.x adapter or
* %NULL if no such mapping was possible.
*/
struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw,
const struct tb_port *port)
{
int usb4_idx = usb4_port_idx(sw, port);
struct tb_port *p;
int usb_idx = 0;
/* Find USB3 down port matching usb4_port */
tb_switch_for_each_port(sw, p) {
if (!tb_port_is_usb3_down(p))
continue;
if (usb_idx == usb4_idx)
return p;
usb_idx++;
}
return NULL;
}
/**
* usb4_switch_add_ports() - Add USB4 ports for this router
* @sw: USB4 router
*
* For USB4 router finds all USB4 ports and registers devices for each.
* Can be called to any router.
*
* Return %0 in case of success and negative errno in case of failure.
*/
int usb4_switch_add_ports(struct tb_switch *sw)
{
struct tb_port *port;
if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw))
return 0;
tb_switch_for_each_port(sw, port) {
struct usb4_port *usb4;
if (!tb_port_is_null(port))
continue;
if (!port->cap_usb4)
continue;
usb4 = usb4_port_device_add(port);
if (IS_ERR(usb4)) {
usb4_switch_remove_ports(sw);
return PTR_ERR(usb4);
}
port->usb4 = usb4;
}
return 0;
}
/**
* usb4_switch_remove_ports() - Removes USB4 ports from this router
* @sw: USB4 router
*
* Unregisters previously registered USB4 ports.
*/
void usb4_switch_remove_ports(struct tb_switch *sw)
{
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
if (port->usb4) {
usb4_port_device_remove(port->usb4);
port->usb4 = NULL;
}
}
}
/**
* usb4_port_unlock() - Unlock USB4 downstream port
* @port: USB4 port to unlock
*
* Unlocks USB4 downstream port so that the connection manager can
* access the router below this port.
*/
int usb4_port_unlock(struct tb_port *port)
{
int ret;
u32 val;
ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
if (ret)
return ret;
val &= ~ADP_CS_4_LCK;
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1);
}
/**
* usb4_port_hotplug_enable() - Enables hotplug for a port
* @port: USB4 port to operate on
*
* Enables hot plug events on a given port. This is only intended
* to be used on lane, DP-IN, and DP-OUT adapters.
*/
int usb4_port_hotplug_enable(struct tb_port *port)
{
int ret;
u32 val;
ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
if (ret)
return ret;
val &= ~ADP_CS_5_DHP;
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
}
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
u32 val;
if (!port->cap_usb4)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
if (configured)
val |= PORT_CS_19_PC;
else
val &= ~PORT_CS_19_PC;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
}
/**
* usb4_port_configure() - Set USB4 port configured
* @port: USB4 router
*
* Sets the USB4 link to be configured for power management purposes.
*/
int usb4_port_configure(struct tb_port *port)
{
return usb4_port_set_configured(port, true);
}
/**
* usb4_port_unconfigure() - Set USB4 port unconfigured
* @port: USB4 router
*
* Sets the USB4 link to be unconfigured for power management purposes.
*/
void usb4_port_unconfigure(struct tb_port *port)
{
usb4_port_set_configured(port, false);
}
static int usb4_set_xdomain_configured(struct tb_port *port, bool configured)
{
int ret;
u32 val;
if (!port->cap_usb4)
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
if (ret)
return ret;
if (configured)
val |= PORT_CS_19_PID;
else
val &= ~PORT_CS_19_PID;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_19, 1);
}
/**
* usb4_port_configure_xdomain() - Configure port for XDomain
* @port: USB4 port connected to another host
* @xd: XDomain that is connected to the port
*
* Marks the USB4 port as being connected to another host and updates
* the link type. Returns %0 in success and negative errno in failure.
*/
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
xd->link_usb4 = link_is_usb4(port);
return usb4_set_xdomain_configured(port, true);
}
/**
* usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain
* @port: USB4 port that was connected to another host
*
* Clears USB4 port from being marked as XDomain.
*/
void usb4_port_unconfigure_xdomain(struct tb_port *port)
{
usb4_set_xdomain_configured(port, false);
}
static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit,
u32 value, int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
do {
u32 val;
int ret;
ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1);
if (ret)
return ret;
if ((val & bit) == value)
return 0;
usleep_range(50, 100);
} while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
}
static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords)
{
if (dwords > USB4_DATA_DWORDS)
return -EINVAL;
return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
dwords);
}
static int usb4_port_write_data(struct tb_port *port, const void *data,
size_t dwords)
{
if (dwords > USB4_DATA_DWORDS)
return -EINVAL;
return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2,
dwords);
}
static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target,
u8 index, u8 reg, void *buf, u8 size)
{
size_t dwords = DIV_ROUND_UP(size, 4);
int ret;
u32 val;
if (!port->cap_usb4)
return -EINVAL;
val = reg;
val |= size << PORT_CS_1_LENGTH_SHIFT;
val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
if (target == USB4_SB_TARGET_RETIMER)
val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
val |= PORT_CS_1_PND;
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_1, 1);
if (ret)
return ret;
ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
PORT_CS_1_PND, 0, 500);
if (ret)
return ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_1, 1);
if (ret)
return ret;
if (val & PORT_CS_1_NR)
return -ENODEV;
if (val & PORT_CS_1_RC)
return -EIO;
return buf ? usb4_port_read_data(port, buf, dwords) : 0;
}
static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target,
u8 index, u8 reg, const void *buf, u8 size)
{
size_t dwords = DIV_ROUND_UP(size, 4);
int ret;
u32 val;
if (!port->cap_usb4)
return -EINVAL;
if (buf) {
ret = usb4_port_write_data(port, buf, dwords);
if (ret)
return ret;
}
val = reg;
val |= size << PORT_CS_1_LENGTH_SHIFT;
val |= PORT_CS_1_WNR_WRITE;
val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK;
if (target == USB4_SB_TARGET_RETIMER)
val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT);
val |= PORT_CS_1_PND;
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_1, 1);
if (ret)
return ret;
ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1,
PORT_CS_1_PND, 0, 500);
if (ret)
return ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_1, 1);
if (ret)
return ret;
if (val & PORT_CS_1_NR)
return -ENODEV;
if (val & PORT_CS_1_RC)
return -EIO;
return 0;
}
static int usb4_port_sb_opcode_err_to_errno(u32 val)
{
switch (val) {
case 0:
return 0;
case USB4_SB_OPCODE_ERR:
return -EAGAIN;
case USB4_SB_OPCODE_ONS:
return -EOPNOTSUPP;
default:
return -EIO;
}
}
static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target,
u8 index, enum usb4_sb_opcode opcode, int timeout_msec)
{
ktime_t timeout;
u32 val;
int ret;
val = opcode;
ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val,
sizeof(val));
if (ret)
return ret;
timeout = ktime_add_ms(ktime_get(), timeout_msec);
do {
/* Check results */
ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE,
&val, sizeof(val));
if (ret)
return ret;
if (val != opcode)
return usb4_port_sb_opcode_err_to_errno(val);
} while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
}
static int usb4_port_set_router_offline(struct tb_port *port, bool offline)
{
u32 val = !offline;
int ret;
ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_METADATA, &val, sizeof(val));
if (ret)
return ret;
val = USB4_SB_OPCODE_ROUTER_OFFLINE;
return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_OPCODE, &val, sizeof(val));
}
/**
* usb4_port_router_offline() - Put the USB4 port to offline mode
* @port: USB4 port
*
* This function puts the USB4 port into offline mode. In this mode the
* port does not react on hotplug events anymore. This needs to be
* called before retimer access is done when the USB4 links is not up.
*
* Returns %0 in case of success and negative errno if there was an
* error.
*/
int usb4_port_router_offline(struct tb_port *port)
{
return usb4_port_set_router_offline(port, true);
}
/**
* usb4_port_router_online() - Put the USB4 port back to online
* @port: USB4 port
*
* Makes the USB4 port functional again.
*/
int usb4_port_router_online(struct tb_port *port)
{
return usb4_port_set_router_offline(port, false);
}
/**
* usb4_port_enumerate_retimers() - Send RT broadcast transaction
* @port: USB4 port
*
* This forces the USB4 port to send broadcast RT transaction which
* makes the retimers on the link to assign index to themselves. Returns
* %0 in case of success and negative errno if there was an error.
*/
int usb4_port_enumerate_retimers(struct tb_port *port)
{
u32 val;
val = USB4_SB_OPCODE_ENUMERATE_RETIMERS;
return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_OPCODE, &val, sizeof(val));
}
/**
* usb4_port_clx_supported() - Check if CLx is supported by the link
* @port: Port to check for CLx support for
*
* PORT_CS_18_CPS bit reflects if the link supports CLx including
* active cables (if connected on the link).
*/
bool usb4_port_clx_supported(struct tb_port *port)
{
int ret;
u32 val;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_usb4 + PORT_CS_18, 1);
if (ret)
return false;
return !!(val & PORT_CS_18_CPS);
}
/**
* usb4_port_margining_caps() - Read USB4 port marginig capabilities
* @port: USB4 port
* @caps: Array with at least two elements to hold the results
*
* Reads the USB4 port lane margining capabilities into @caps.
*/
int usb4_port_margining_caps(struct tb_port *port, u32 *caps)
{
int ret;
ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500);
if (ret)
return ret;
return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_DATA, caps, sizeof(*caps) * 2);
}
/**
* usb4_port_hw_margin() - Run hardware lane margining on port
* @port: USB4 port
* @lanes: Which lanes to run (must match the port capabilities). Can be
* %0, %1 or %7.
* @ber_level: BER level contour value
* @timing: Perform timing margining instead of voltage
* @right_high: Use Right/high margin instead of left/low
* @results: Array with at least two elements to hold the results
*
* Runs hardware lane margining on USB4 port and returns the result in
* @results.
*/
int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes,
unsigned int ber_level, bool timing, bool right_high,
u32 *results)
{
u32 val;
int ret;
val = lanes;
if (timing)
val |= USB4_MARGIN_HW_TIME;
if (right_high)
val |= USB4_MARGIN_HW_RH;
if (ber_level)
val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) &
USB4_MARGIN_HW_BER_MASK;
ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_METADATA, &val, sizeof(val));
if (ret)
return ret;
ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500);
if (ret)
return ret;
return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_DATA, results, sizeof(*results) * 2);
}
/**
* usb4_port_sw_margin() - Run software lane margining on port
* @port: USB4 port
* @lanes: Which lanes to run (must match the port capabilities). Can be
* %0, %1 or %7.
* @timing: Perform timing margining instead of voltage
* @right_high: Use Right/high margin instead of left/low
* @counter: What to do with the error counter
*
* Runs software lane margining on USB4 port. Read back the error
* counters by calling usb4_port_sw_margin_errors(). Returns %0 in
* success and negative errno otherwise.
*/
int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing,
bool right_high, u32 counter)
{
u32 val;
int ret;
val = lanes;
if (timing)
val |= USB4_MARGIN_SW_TIME;
if (right_high)
val |= USB4_MARGIN_SW_RH;
val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) &
USB4_MARGIN_SW_COUNTER_MASK;
ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_METADATA, &val, sizeof(val));
if (ret)
return ret;
return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500);
}
/**
* usb4_port_sw_margin_errors() - Read the software margining error counters
* @port: USB4 port
* @errors: Error metadata is copied here.
*
* This reads back the software margining error counters from the port.
* Returns %0 in success and negative errno otherwise.
*/
int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors)
{
int ret;
ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150);
if (ret)
return ret;
return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0,
USB4_SB_METADATA, errors, sizeof(*errors));
}
static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
enum usb4_sb_opcode opcode,
int timeout_msec)
{
return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode,
timeout_msec);
}
/**
* usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions
* @port: USB4 port
* @index: Retimer index
*
* Enables sideband channel transations on SBTX. Can be used when USB4
* link does not go up, for example if there is no device connected.
*/
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
{
int ret;
ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
500);
if (ret != -ENODEV)
return ret;
/*
* Per the USB4 retimer spec, the retimer is not required to
* send an RT (Retimer Transaction) response for the first
* SET_INBOUND_SBTX command
*/
return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX,
500);
}
/**
* usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
* @port: USB4 port
* @index: Retimer index
*
* Disables sideband channel transations on SBTX. The reverse of
* usb4_port_retimer_set_inbound_sbtx().
*/
int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
{
return usb4_port_retimer_op(port, index,
USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
}
/**
* usb4_port_retimer_read() - Read from retimer sideband registers
* @port: USB4 port
* @index: Retimer index
* @reg: Sideband register to read
* @buf: Data from @reg is stored here
* @size: Number of bytes to read
*
* Function reads retimer sideband registers starting from @reg. The
* retimer is connected to @port at @index. Returns %0 in case of
* success, and read data is copied to @buf. If there is no retimer
* present at given @index returns %-ENODEV. In any other failure
* returns negative errno.
*/
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
u8 size)
{
return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
size);
}
/**
* usb4_port_retimer_write() - Write to retimer sideband registers
* @port: USB4 port
* @index: Retimer index
* @reg: Sideband register to write
* @buf: Data that is written starting from @reg
* @size: Number of bytes to write
*
* Writes retimer sideband registers starting from @reg. The retimer is
* connected to @port at @index. Returns %0 in case of success. If there
* is no retimer present at given @index returns %-ENODEV. In any other
* failure returns negative errno.
*/
int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
const void *buf, u8 size)
{
return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf,
size);
}
/**
* usb4_port_retimer_is_last() - Is the retimer last on-board retimer
* @port: USB4 port
* @index: Retimer index
*
* If the retimer at @index is last one (connected directly to the
* Type-C port) this function returns %1. If it is not returns %0. If
* the retimer is not present returns %-ENODEV. Otherwise returns
* negative errno.
*/
int usb4_port_retimer_is_last(struct tb_port *port, u8 index)
{
u32 metadata;
int ret;
ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER,
500);
if (ret)
return ret;
ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
sizeof(metadata));
return ret ? ret : metadata & 1;
}
/**
* usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size
* @port: USB4 port
* @index: Retimer index
*
* Reads NVM sector size (in bytes) of a retimer at @index. This
* operation can be used to determine whether the retimer supports NVM
* upgrade for example. Returns sector size in bytes or negative errno
* in case of error. Specifically returns %-ENODEV if there is no
* retimer at @index.
*/
int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index)
{
u32 metadata;
int ret;
ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE,
500);
if (ret)
return ret;
ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata,
sizeof(metadata));
return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK;
}
/**
* usb4_port_retimer_nvm_set_offset() - Set NVM write offset
* @port: USB4 port
* @index: Retimer index
* @address: Start offset
*
* Exlicitly sets NVM write offset. Normally when writing to NVM this is
* done automatically by usb4_port_retimer_nvm_write().
*
* Returns %0 in success and negative errno if there was a failure.
*/
int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index,
unsigned int address)
{
u32 metadata, dwaddress;
int ret;
dwaddress = address / 4;
metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) &
USB4_NVM_SET_OFFSET_MASK;
ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
sizeof(metadata));
if (ret)
return ret;
return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET,
500);
}
struct retimer_info {
struct tb_port *port;
u8 index;
};
static int usb4_port_retimer_nvm_write_next_block(void *data,
unsigned int dwaddress, const void *buf, size_t dwords)
{
const struct retimer_info *info = data;
struct tb_port *port = info->port;
u8 index = info->index;
int ret;
ret = usb4_port_retimer_write(port, index, USB4_SB_DATA,
buf, dwords * 4);
if (ret)
return ret;
return usb4_port_retimer_op(port, index,
USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000);
}
/**
* usb4_port_retimer_nvm_write() - Write to retimer NVM
* @port: USB4 port
* @index: Retimer index
* @address: Byte address where to start the write
* @buf: Data to write
* @size: Size in bytes how much to write
*
* Writes @size bytes from @buf to the retimer NVM. Used for NVM
* upgrade. Returns %0 if the data was written successfully and negative
* errno in case of failure. Specifically returns %-ENODEV if there is
* no retimer at @index.
*/
int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address,
const void *buf, size_t size)
{
struct retimer_info info = { .port = port, .index = index };
int ret;
ret = usb4_port_retimer_nvm_set_offset(port, index, address);
if (ret)
return ret;
return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES,
usb4_port_retimer_nvm_write_next_block, &info);
}
/**
* usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade
* @port: USB4 port
* @index: Retimer index
*
* After the new NVM image has been written via usb4_port_retimer_nvm_write()
* this function can be used to trigger the NVM upgrade process. If
* successful the retimer restarts with the new NVM and may not have the
* index set so one needs to call usb4_port_enumerate_retimers() to
* force index to be assigned.
*/
int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index)
{
u32 val;
/*
* We need to use the raw operation here because once the
* authentication completes the retimer index is not set anymore
* so we do not get back the status now.
*/
val = USB4_SB_OPCODE_NVM_AUTH_WRITE;
return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index,
USB4_SB_OPCODE, &val, sizeof(val));
}
/**
* usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade
* @port: USB4 port
* @index: Retimer index
* @status: Raw status code read from metadata
*
* This can be called after usb4_port_retimer_nvm_authenticate() and
* usb4_port_enumerate_retimers() to fetch status of the NVM upgrade.
*
* Returns %0 if the authentication status was successfully read. The
* completion metadata (the result) is then stored into @status. If
* reading the status fails, returns negative errno.
*/
int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index,
u32 *status)
{
u32 metadata, val;
int ret;
ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val,
sizeof(val));
if (ret)
return ret;
ret = usb4_port_sb_opcode_err_to_errno(val);
switch (ret) {
case 0:
*status = 0;
return 0;
case -EAGAIN:
ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA,
&metadata, sizeof(metadata));
if (ret)
return ret;
*status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK;
return 0;
default:
return ret;
}
}
static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress,
void *buf, size_t dwords)
{
const struct retimer_info *info = data;
struct tb_port *port = info->port;
u8 index = info->index;
u32 metadata;
int ret;
metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT;
if (dwords < USB4_DATA_DWORDS)
metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT;
ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata,
sizeof(metadata));
if (ret)
return ret;
ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500);
if (ret)
return ret;
return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf,
dwords * 4);
}
/**
* usb4_port_retimer_nvm_read() - Read contents of retimer NVM
* @port: USB4 port
* @index: Retimer index
* @address: NVM address (in bytes) to start reading
* @buf: Data read from NVM is stored here
* @size: Number of bytes to read
*
* Reads retimer NVM and copies the contents to @buf. Returns %0 if the
* read was successful and negative errno in case of failure.
* Specifically returns %-ENODEV if there is no retimer at @index.
*/
int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index,
unsigned int address, void *buf, size_t size)
{
struct retimer_info info = { .port = port, .index = index };
return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES,
usb4_port_retimer_nvm_read_block, &info);
}
static inline unsigned int
usb4_usb3_port_max_bandwidth(const struct tb_port *port, unsigned int bw)
{
/* Take the possible bandwidth limitation into account */
if (port->max_bw)
return min(bw, port->max_bw);
return bw;
}
/**
* usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate
* @port: USB3 adapter port
*
* Return maximum supported link rate of a USB3 adapter in Mb/s.
* Negative errno in case of error.
*/
int usb4_usb3_port_max_link_rate(struct tb_port *port)
{
int ret, lr;
u32 val;
if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_4, 1);
if (ret)
return ret;
lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT;
ret = lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000;
return usb4_usb3_port_max_bandwidth(port, ret);
}
/**
* usb4_usb3_port_actual_link_rate() - Established USB3 link rate
* @port: USB3 adapter port
*
* Return actual established link rate of a USB3 adapter in Mb/s. If the
* link is not up returns %0 and negative errno in case of failure.
*/
int usb4_usb3_port_actual_link_rate(struct tb_port *port)
{
int ret, lr;
u32 val;
if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port))
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_4, 1);
if (ret)
return ret;
if (!(val & ADP_USB3_CS_4_ULV))
return 0;
lr = val & ADP_USB3_CS_4_ALR_MASK;
ret = lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000;
return usb4_usb3_port_max_bandwidth(port, ret);
}
static int usb4_usb3_port_cm_request(struct tb_port *port, bool request)
{
int ret;
u32 val;
if (!tb_port_is_usb3_down(port))
return -EINVAL;
if (tb_route(port->sw))
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_2, 1);
if (ret)
return ret;
if (request)
val |= ADP_USB3_CS_2_CMR;
else
val &= ~ADP_USB3_CS_2_CMR;
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_2, 1);
if (ret)
return ret;
/*
* We can use val here directly as the CMR bit is in the same place
* as HCA. Just mask out others.
*/
val &= ADP_USB3_CS_2_CMR;
return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1,
ADP_USB3_CS_1_HCA, val, 1500);
}
static inline int usb4_usb3_port_set_cm_request(struct tb_port *port)
{
return usb4_usb3_port_cm_request(port, true);
}
static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port)
{
return usb4_usb3_port_cm_request(port, false);
}
static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale)
{
unsigned long uframes;
uframes = bw * 512UL << scale;
return DIV_ROUND_CLOSEST(uframes * 8000, MEGA);
}
static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale)
{
unsigned long uframes;
/* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */
uframes = ((unsigned long)mbps * MEGA) / 8000;
return DIV_ROUND_UP(uframes, 512UL << scale);
}
static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port,
int *upstream_bw,
int *downstream_bw)
{
u32 val, bw, scale;
int ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_2, 1);
if (ret)
return ret;
ret = tb_port_read(port, &scale, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_3, 1);
if (ret)
return ret;
scale &= ADP_USB3_CS_3_SCALE_MASK;
bw = val & ADP_USB3_CS_2_AUBW_MASK;
*upstream_bw = usb3_bw_to_mbps(bw, scale);
bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT;
*downstream_bw = usb3_bw_to_mbps(bw, scale);
return 0;
}
/**
* usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3
* @port: USB3 adapter port
* @upstream_bw: Allocated upstream bandwidth is stored here
* @downstream_bw: Allocated downstream bandwidth is stored here
*
* Stores currently allocated USB3 bandwidth into @upstream_bw and
* @downstream_bw in Mb/s. Returns %0 in case of success and negative
* errno in failure.
*/
int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw)
{
int ret;
ret = usb4_usb3_port_set_cm_request(port);
if (ret)
return ret;
ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw,
downstream_bw);
usb4_usb3_port_clear_cm_request(port);
return ret;
}
static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port,
int *upstream_bw,
int *downstream_bw)
{
u32 val, bw, scale;
int ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_1, 1);
if (ret)
return ret;
ret = tb_port_read(port, &scale, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_3, 1);
if (ret)
return ret;
scale &= ADP_USB3_CS_3_SCALE_MASK;
bw = val & ADP_USB3_CS_1_CUBW_MASK;
*upstream_bw = usb3_bw_to_mbps(bw, scale);
bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT;
*downstream_bw = usb3_bw_to_mbps(bw, scale);
return 0;
}
static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
int upstream_bw,
int downstream_bw)
{
u32 val, ubw, dbw, scale;
int ret, max_bw;
/* Figure out suitable scale */
scale = 0;
max_bw = max(upstream_bw, downstream_bw);
while (scale < 64) {
if (mbps_to_usb3_bw(max_bw, scale) < 4096)
break;
scale++;
}
if (WARN_ON(scale >= 64))
return -EINVAL;
ret = tb_port_write(port, &scale, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_3, 1);
if (ret)
return ret;
ubw = mbps_to_usb3_bw(upstream_bw, scale);
dbw = mbps_to_usb3_bw(downstream_bw, scale);
tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_2, 1);
if (ret)
return ret;
val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK);
val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT;
val |= ubw;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_2, 1);
}
/**
* usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3
* @port: USB3 adapter port
* @upstream_bw: New upstream bandwidth
* @downstream_bw: New downstream bandwidth
*
* This can be used to set how much bandwidth is allocated for the USB3
* tunneled isochronous traffic. @upstream_bw and @downstream_bw are the
* new values programmed to the USB3 adapter allocation registers. If
* the values are lower than what is currently consumed the allocation
* is set to what is currently consumed instead (consumed bandwidth
* cannot be taken away by CM). The actual new values are returned in
* @upstream_bw and @downstream_bw.
*
* Returns %0 in case of success and negative errno if there was a
* failure.
*/
int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw)
{
int ret, consumed_up, consumed_down, allocate_up, allocate_down;
ret = usb4_usb3_port_set_cm_request(port);
if (ret)
return ret;
ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
&consumed_down);
if (ret)
goto err_request;
/* Don't allow it go lower than what is consumed */
allocate_up = max(*upstream_bw, consumed_up);
allocate_down = max(*downstream_bw, consumed_down);
ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up,
allocate_down);
if (ret)
goto err_request;
*upstream_bw = allocate_up;
*downstream_bw = allocate_down;
err_request:
usb4_usb3_port_clear_cm_request(port);
return ret;
}
/**
* usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth
* @port: USB3 adapter port
* @upstream_bw: New allocated upstream bandwidth
* @downstream_bw: New allocated downstream bandwidth
*
* Releases USB3 allocated bandwidth down to what is actually consumed.
* The new bandwidth is returned in @upstream_bw and @downstream_bw.
*
* Returns 0% in success and negative errno in case of failure.
*/
int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw,
int *downstream_bw)
{
int ret, consumed_up, consumed_down;
ret = usb4_usb3_port_set_cm_request(port);
if (ret)
return ret;
ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up,
&consumed_down);
if (ret)
goto err_request;
/*
* Always keep 1000 Mb/s to make sure xHCI has at least some
* bandwidth available for isochronous traffic.
*/
if (consumed_up < 1000)
consumed_up = 1000;
if (consumed_down < 1000)
consumed_down = 1000;
ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up,
consumed_down);
if (ret)
goto err_request;
*upstream_bw = consumed_up;
*downstream_bw = consumed_down;
err_request:
usb4_usb3_port_clear_cm_request(port);
return ret;
}
static bool is_usb4_dpin(const struct tb_port *port)
{
if (!tb_port_is_dpin(port))
return false;
if (!tb_switch_is_usb4(port->sw))
return false;
return true;
}
/**
* usb4_dp_port_set_cm_id() - Assign CM ID to the DP IN adapter
* @port: DP IN adapter
* @cm_id: CM ID to assign
*
* Sets CM ID for the @port. Returns %0 on success and negative errno
* otherwise. Speficially returns %-EOPNOTSUPP if the @port does not
* support this.
*/
int usb4_dp_port_set_cm_id(struct tb_port *port, int cm_id)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_CM_ID_MASK;
val |= cm_id << ADP_DP_CS_2_CM_ID_SHIFT;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_bandwidth_mode_supported() - Is the bandwidth allocation mode
* supported
* @port: DP IN adapter to check
*
* Can be called to any DP IN adapter. Returns true if the adapter
* supports USB4 bandwidth allocation mode, false otherwise.
*/
bool usb4_dp_port_bandwidth_mode_supported(struct tb_port *port)
{
int ret;
u32 val;
if (!is_usb4_dpin(port))
return false;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return false;
return !!(val & DP_COMMON_CAP_BW_MODE);
}
/**
* usb4_dp_port_bandwidth_mode_enabled() - Is the bandwidth allocation mode
* enabled
* @port: DP IN adapter to check
*
* Can be called to any DP IN adapter. Returns true if the bandwidth
* allocation mode has been enabled, false otherwise.
*/
bool usb4_dp_port_bandwidth_mode_enabled(struct tb_port *port)
{
int ret;
u32 val;
if (!is_usb4_dpin(port))
return false;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_8, 1);
if (ret)
return false;
return !!(val & ADP_DP_CS_8_DPME);
}
/**
* usb4_dp_port_set_cm_bandwidth_mode_supported() - Set/clear CM support for
* bandwidth allocation mode
* @port: DP IN adapter
* @supported: Does the CM support bandwidth allocation mode
*
* Can be called to any DP IN adapter. Sets or clears the CM support bit
* of the DP IN adapter. Returns %0 in success and negative errno
* otherwise. Specifically returns %-OPNOTSUPP if the passed in adapter
* does not support this.
*/
int usb4_dp_port_set_cm_bandwidth_mode_supported(struct tb_port *port,
bool supported)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
if (supported)
val |= ADP_DP_CS_2_CMMS;
else
val &= ~ADP_DP_CS_2_CMMS;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_group_id() - Return Group ID assigned for the adapter
* @port: DP IN adapter
*
* Reads bandwidth allocation Group ID from the DP IN adapter and
* returns it. If the adapter does not support setting Group_ID
* %-EOPNOTSUPP is returned.
*/
int usb4_dp_port_group_id(struct tb_port *port)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
return (val & ADP_DP_CS_2_GROUP_ID_MASK) >> ADP_DP_CS_2_GROUP_ID_SHIFT;
}
/**
* usb4_dp_port_set_group_id() - Set adapter Group ID
* @port: DP IN adapter
* @group_id: Group ID for the adapter
*
* Sets bandwidth allocation mode Group ID for the DP IN adapter.
* Returns %0 in case of success and negative errno otherwise.
* Specifically returns %-EOPNOTSUPP if the adapter does not support
* this.
*/
int usb4_dp_port_set_group_id(struct tb_port *port, int group_id)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_GROUP_ID_MASK;
val |= group_id << ADP_DP_CS_2_GROUP_ID_SHIFT;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_nrd() - Read non-reduced rate and lanes
* @port: DP IN adapter
* @rate: Non-reduced rate in Mb/s is placed here
* @lanes: Non-reduced lanes are placed here
*
* Reads the non-reduced rate and lanes from the DP IN adapter. Returns
* %0 in success and negative errno otherwise. Specifically returns
* %-EOPNOTSUPP if the adapter does not support this.
*/
int usb4_dp_port_nrd(struct tb_port *port, int *rate, int *lanes)
{
u32 val, tmp;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
tmp = (val & ADP_DP_CS_2_NRD_MLR_MASK) >> ADP_DP_CS_2_NRD_MLR_SHIFT;
switch (tmp) {
case DP_COMMON_CAP_RATE_RBR:
*rate = 1620;
break;
case DP_COMMON_CAP_RATE_HBR:
*rate = 2700;
break;
case DP_COMMON_CAP_RATE_HBR2:
*rate = 5400;
break;
case DP_COMMON_CAP_RATE_HBR3:
*rate = 8100;
break;
}
tmp = val & ADP_DP_CS_2_NRD_MLC_MASK;
switch (tmp) {
case DP_COMMON_CAP_1_LANE:
*lanes = 1;
break;
case DP_COMMON_CAP_2_LANES:
*lanes = 2;
break;
case DP_COMMON_CAP_4_LANES:
*lanes = 4;
break;
}
return 0;
}
/**
* usb4_dp_port_set_nrd() - Set non-reduced rate and lanes
* @port: DP IN adapter
* @rate: Non-reduced rate in Mb/s
* @lanes: Non-reduced lanes
*
* Before the capabilities reduction this function can be used to set
* the non-reduced values for the DP IN adapter. Returns %0 in success
* and negative errno otherwise. If the adapter does not support this
* %-EOPNOTSUPP is returned.
*/
int usb4_dp_port_set_nrd(struct tb_port *port, int rate, int lanes)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_NRD_MLR_MASK;
switch (rate) {
case 1620:
break;
case 2700:
val |= (DP_COMMON_CAP_RATE_HBR << ADP_DP_CS_2_NRD_MLR_SHIFT)
& ADP_DP_CS_2_NRD_MLR_MASK;
break;
case 5400:
val |= (DP_COMMON_CAP_RATE_HBR2 << ADP_DP_CS_2_NRD_MLR_SHIFT)
& ADP_DP_CS_2_NRD_MLR_MASK;
break;
case 8100:
val |= (DP_COMMON_CAP_RATE_HBR3 << ADP_DP_CS_2_NRD_MLR_SHIFT)
& ADP_DP_CS_2_NRD_MLR_MASK;
break;
default:
return -EINVAL;
}
val &= ~ADP_DP_CS_2_NRD_MLC_MASK;
switch (lanes) {
case 1:
break;
case 2:
val |= DP_COMMON_CAP_2_LANES;
break;
case 4:
val |= DP_COMMON_CAP_4_LANES;
break;
default:
return -EINVAL;
}
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_granularity() - Return granularity for the bandwidth values
* @port: DP IN adapter
*
* Reads the programmed granularity from @port. If the DP IN adapter does
* not support bandwidth allocation mode returns %-EOPNOTSUPP and negative
* errno in other error cases.
*/
int usb4_dp_port_granularity(struct tb_port *port)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ADP_DP_CS_2_GR_MASK;
val >>= ADP_DP_CS_2_GR_SHIFT;
switch (val) {
case ADP_DP_CS_2_GR_0_25G:
return 250;
case ADP_DP_CS_2_GR_0_5G:
return 500;
case ADP_DP_CS_2_GR_1G:
return 1000;
}
return -EINVAL;
}
/**
* usb4_dp_port_set_granularity() - Set granularity for the bandwidth values
* @port: DP IN adapter
* @granularity: Granularity in Mb/s. Supported values: 1000, 500 and 250.
*
* Sets the granularity used with the estimated, allocated and requested
* bandwidth. Returns %0 in success and negative errno otherwise. If the
* adapter does not support this %-EOPNOTSUPP is returned.
*/
int usb4_dp_port_set_granularity(struct tb_port *port, int granularity)
{
u32 val;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_GR_MASK;
switch (granularity) {
case 250:
val |= ADP_DP_CS_2_GR_0_25G << ADP_DP_CS_2_GR_SHIFT;
break;
case 500:
val |= ADP_DP_CS_2_GR_0_5G << ADP_DP_CS_2_GR_SHIFT;
break;
case 1000:
val |= ADP_DP_CS_2_GR_1G << ADP_DP_CS_2_GR_SHIFT;
break;
default:
return -EINVAL;
}
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_set_estimated_bandwidth() - Set estimated bandwidth
* @port: DP IN adapter
* @bw: Estimated bandwidth in Mb/s.
*
* Sets the estimated bandwidth to @bw. Set the granularity by calling
* usb4_dp_port_set_granularity() before calling this. The @bw is round
* down to the closest granularity multiplier. Returns %0 in success
* and negative errno otherwise. Specifically returns %-EOPNOTSUPP if
* the adapter does not support this.
*/
int usb4_dp_port_set_estimated_bandwidth(struct tb_port *port, int bw)
{
u32 val, granularity;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = usb4_dp_port_granularity(port);
if (ret < 0)
return ret;
granularity = ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_ESTIMATED_BW_MASK;
val |= (bw / granularity) << ADP_DP_CS_2_ESTIMATED_BW_SHIFT;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_allocated_bandwidth() - Return allocated bandwidth
* @port: DP IN adapter
*
* Reads and returns allocated bandwidth for @port in Mb/s (taking into
* account the programmed granularity). Returns negative errno in case
* of error.
*/
int usb4_dp_port_allocated_bandwidth(struct tb_port *port)
{
u32 val, granularity;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = usb4_dp_port_granularity(port);
if (ret < 0)
return ret;
granularity = ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + DP_STATUS, 1);
if (ret)
return ret;
val &= DP_STATUS_ALLOCATED_BW_MASK;
val >>= DP_STATUS_ALLOCATED_BW_SHIFT;
return val * granularity;
}
static int __usb4_dp_port_set_cm_ack(struct tb_port *port, bool ack)
{
u32 val;
int ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
if (ack)
val |= ADP_DP_CS_2_CA;
else
val &= ~ADP_DP_CS_2_CA;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
static inline int usb4_dp_port_set_cm_ack(struct tb_port *port)
{
return __usb4_dp_port_set_cm_ack(port, true);
}
static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
int timeout_msec)
{
ktime_t end;
u32 val;
int ret;
ret = __usb4_dp_port_set_cm_ack(port, false);
if (ret)
return ret;
end = ktime_add_ms(ktime_get(), timeout_msec);
do {
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_8, 1);
if (ret)
return ret;
if (!(val & ADP_DP_CS_8_DR))
break;
usleep_range(50, 100);
} while (ktime_before(ktime_get(), end));
if (val & ADP_DP_CS_8_DR)
return -ETIMEDOUT;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
if (ret)
return ret;
val &= ~ADP_DP_CS_2_CA;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
}
/**
* usb4_dp_port_allocate_bandwidth() - Set allocated bandwidth
* @port: DP IN adapter
* @bw: New allocated bandwidth in Mb/s
*
* Communicates the new allocated bandwidth with the DPCD (graphics
* driver). Takes into account the programmed granularity. Returns %0 in
* success and negative errno in case of error.
*/
int usb4_dp_port_allocate_bandwidth(struct tb_port *port, int bw)
{
u32 val, granularity;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = usb4_dp_port_granularity(port);
if (ret < 0)
return ret;
granularity = ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + DP_STATUS, 1);
if (ret)
return ret;
val &= ~DP_STATUS_ALLOCATED_BW_MASK;
val |= (bw / granularity) << DP_STATUS_ALLOCATED_BW_SHIFT;
ret = tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + DP_STATUS, 1);
if (ret)
return ret;
ret = usb4_dp_port_set_cm_ack(port);
if (ret)
return ret;
return usb4_dp_port_wait_and_clear_cm_ack(port, 500);
}
/**
* usb4_dp_port_requested_bandwidth() - Read requested bandwidth
* @port: DP IN adapter
*
* Reads the DPCD (graphics driver) requested bandwidth and returns it
* in Mb/s. Takes the programmed granularity into account. In case of
* error returns negative errno. Specifically returns %-EOPNOTSUPP if
* the adapter does not support bandwidth allocation mode, and %ENODATA
* if there is no active bandwidth request from the graphics driver.
*/
int usb4_dp_port_requested_bandwidth(struct tb_port *port)
{
u32 val, granularity;
int ret;
if (!is_usb4_dpin(port))
return -EOPNOTSUPP;
ret = usb4_dp_port_granularity(port);
if (ret < 0)
return ret;
granularity = ret;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_8, 1);
if (ret)
return ret;
if (!(val & ADP_DP_CS_8_DR))
return -ENODATA;
return (val & ADP_DP_CS_8_REQUESTED_BW_MASK) * granularity;
}
/**
* usb4_pci_port_set_ext_encapsulation() - Enable/disable extended encapsulation
* @port: PCIe adapter
* @enable: Enable/disable extended encapsulation
*
* Enables or disables extended encapsulation used in PCIe tunneling. Caller
* needs to make sure both adapters support this before enabling. Returns %0 on
* success and negative errno otherwise.
*/
int usb4_pci_port_set_ext_encapsulation(struct tb_port *port, bool enable)
{
u32 val;
int ret;
if (!tb_port_is_pcie_up(port) && !tb_port_is_pcie_down(port))
return -EINVAL;
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_PCIE_CS_1, 1);
if (ret)
return ret;
if (enable)
val |= ADP_PCIE_CS_1_EE;
else
val &= ~ADP_PCIE_CS_1_EE;
return tb_port_write(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_PCIE_CS_1, 1);
}
| linux-master | drivers/thunderbolt/usb4.c |
// SPDX-License-Identifier: GPL-2.0
/*
* KUnit tests
*
* Copyright (C) 2020, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include <kunit/test.h>
#include <linux/idr.h>
#include "tb.h"
#include "tunnel.h"
static int __ida_init(struct kunit_resource *res, void *context)
{
struct ida *ida = context;
ida_init(ida);
res->data = ida;
return 0;
}
static void __ida_destroy(struct kunit_resource *res)
{
struct ida *ida = res->data;
ida_destroy(ida);
}
static void kunit_ida_init(struct kunit *test, struct ida *ida)
{
kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida);
}
static struct tb_switch *alloc_switch(struct kunit *test, u64 route,
u8 upstream_port, u8 max_port_number)
{
struct tb_switch *sw;
size_t size;
int i;
sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL);
if (!sw)
return NULL;
sw->config.upstream_port_number = upstream_port;
sw->config.depth = tb_route_length(route);
sw->config.route_hi = upper_32_bits(route);
sw->config.route_lo = lower_32_bits(route);
sw->config.enabled = 0;
sw->config.max_port_number = max_port_number;
size = (sw->config.max_port_number + 1) * sizeof(*sw->ports);
sw->ports = kunit_kzalloc(test, size, GFP_KERNEL);
if (!sw->ports)
return NULL;
for (i = 0; i <= sw->config.max_port_number; i++) {
sw->ports[i].sw = sw;
sw->ports[i].port = i;
sw->ports[i].config.port_number = i;
if (i) {
kunit_ida_init(test, &sw->ports[i].in_hopids);
kunit_ida_init(test, &sw->ports[i].out_hopids);
}
}
return sw;
}
static struct tb_switch *alloc_host(struct kunit *test)
{
struct tb_switch *sw;
sw = alloc_switch(test, 0, 7, 13);
if (!sw)
return NULL;
sw->config.vendor_id = 0x8086;
sw->config.device_id = 0x9a1b;
sw->ports[0].config.type = TB_TYPE_PORT;
sw->ports[0].config.max_in_hop_id = 7;
sw->ports[0].config.max_out_hop_id = 7;
sw->ports[1].config.type = TB_TYPE_PORT;
sw->ports[1].config.max_in_hop_id = 19;
sw->ports[1].config.max_out_hop_id = 19;
sw->ports[1].total_credits = 60;
sw->ports[1].ctl_credits = 2;
sw->ports[1].dual_link_port = &sw->ports[2];
sw->ports[2].config.type = TB_TYPE_PORT;
sw->ports[2].config.max_in_hop_id = 19;
sw->ports[2].config.max_out_hop_id = 19;
sw->ports[2].total_credits = 60;
sw->ports[2].ctl_credits = 2;
sw->ports[2].dual_link_port = &sw->ports[1];
sw->ports[2].link_nr = 1;
sw->ports[3].config.type = TB_TYPE_PORT;
sw->ports[3].config.max_in_hop_id = 19;
sw->ports[3].config.max_out_hop_id = 19;
sw->ports[3].total_credits = 60;
sw->ports[3].ctl_credits = 2;
sw->ports[3].dual_link_port = &sw->ports[4];
sw->ports[4].config.type = TB_TYPE_PORT;
sw->ports[4].config.max_in_hop_id = 19;
sw->ports[4].config.max_out_hop_id = 19;
sw->ports[4].total_credits = 60;
sw->ports[4].ctl_credits = 2;
sw->ports[4].dual_link_port = &sw->ports[3];
sw->ports[4].link_nr = 1;
sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN;
sw->ports[5].config.max_in_hop_id = 9;
sw->ports[5].config.max_out_hop_id = 9;
sw->ports[5].cap_adap = -1;
sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN;
sw->ports[6].config.max_in_hop_id = 9;
sw->ports[6].config.max_out_hop_id = 9;
sw->ports[6].cap_adap = -1;
sw->ports[7].config.type = TB_TYPE_NHI;
sw->ports[7].config.max_in_hop_id = 11;
sw->ports[7].config.max_out_hop_id = 11;
sw->ports[7].config.nfc_credits = 0x41800000;
sw->ports[8].config.type = TB_TYPE_PCIE_DOWN;
sw->ports[8].config.max_in_hop_id = 8;
sw->ports[8].config.max_out_hop_id = 8;
sw->ports[9].config.type = TB_TYPE_PCIE_DOWN;
sw->ports[9].config.max_in_hop_id = 8;
sw->ports[9].config.max_out_hop_id = 8;
sw->ports[10].disabled = true;
sw->ports[11].disabled = true;
sw->ports[12].config.type = TB_TYPE_USB3_DOWN;
sw->ports[12].config.max_in_hop_id = 8;
sw->ports[12].config.max_out_hop_id = 8;
sw->ports[13].config.type = TB_TYPE_USB3_DOWN;
sw->ports[13].config.max_in_hop_id = 8;
sw->ports[13].config.max_out_hop_id = 8;
return sw;
}
static struct tb_switch *alloc_host_usb4(struct kunit *test)
{
struct tb_switch *sw;
sw = alloc_host(test);
if (!sw)
return NULL;
sw->generation = 4;
sw->credit_allocation = true;
sw->max_usb3_credits = 32;
sw->min_dp_aux_credits = 1;
sw->min_dp_main_credits = 0;
sw->max_pcie_credits = 64;
sw->max_dma_credits = 14;
return sw;
}
static struct tb_switch *alloc_host_br(struct kunit *test)
{
struct tb_switch *sw;
sw = alloc_host_usb4(test);
if (!sw)
return NULL;
sw->ports[10].config.type = TB_TYPE_DP_HDMI_IN;
sw->ports[10].config.max_in_hop_id = 9;
sw->ports[10].config.max_out_hop_id = 9;
sw->ports[10].cap_adap = -1;
sw->ports[10].disabled = false;
return sw;
}
static struct tb_switch *alloc_dev_default(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
{
struct tb_port *port, *upstream_port;
struct tb_switch *sw;
sw = alloc_switch(test, route, 1, 19);
if (!sw)
return NULL;
sw->config.vendor_id = 0x8086;
sw->config.device_id = 0x15ef;
sw->ports[0].config.type = TB_TYPE_PORT;
sw->ports[0].config.max_in_hop_id = 8;
sw->ports[0].config.max_out_hop_id = 8;
sw->ports[1].config.type = TB_TYPE_PORT;
sw->ports[1].config.max_in_hop_id = 19;
sw->ports[1].config.max_out_hop_id = 19;
sw->ports[1].total_credits = 60;
sw->ports[1].ctl_credits = 2;
sw->ports[1].dual_link_port = &sw->ports[2];
sw->ports[2].config.type = TB_TYPE_PORT;
sw->ports[2].config.max_in_hop_id = 19;
sw->ports[2].config.max_out_hop_id = 19;
sw->ports[2].total_credits = 60;
sw->ports[2].ctl_credits = 2;
sw->ports[2].dual_link_port = &sw->ports[1];
sw->ports[2].link_nr = 1;
sw->ports[3].config.type = TB_TYPE_PORT;
sw->ports[3].config.max_in_hop_id = 19;
sw->ports[3].config.max_out_hop_id = 19;
sw->ports[3].total_credits = 60;
sw->ports[3].ctl_credits = 2;
sw->ports[3].dual_link_port = &sw->ports[4];
sw->ports[4].config.type = TB_TYPE_PORT;
sw->ports[4].config.max_in_hop_id = 19;
sw->ports[4].config.max_out_hop_id = 19;
sw->ports[4].total_credits = 60;
sw->ports[4].ctl_credits = 2;
sw->ports[4].dual_link_port = &sw->ports[3];
sw->ports[4].link_nr = 1;
sw->ports[5].config.type = TB_TYPE_PORT;
sw->ports[5].config.max_in_hop_id = 19;
sw->ports[5].config.max_out_hop_id = 19;
sw->ports[5].total_credits = 60;
sw->ports[5].ctl_credits = 2;
sw->ports[5].dual_link_port = &sw->ports[6];
sw->ports[6].config.type = TB_TYPE_PORT;
sw->ports[6].config.max_in_hop_id = 19;
sw->ports[6].config.max_out_hop_id = 19;
sw->ports[6].total_credits = 60;
sw->ports[6].ctl_credits = 2;
sw->ports[6].dual_link_port = &sw->ports[5];
sw->ports[6].link_nr = 1;
sw->ports[7].config.type = TB_TYPE_PORT;
sw->ports[7].config.max_in_hop_id = 19;
sw->ports[7].config.max_out_hop_id = 19;
sw->ports[7].total_credits = 60;
sw->ports[7].ctl_credits = 2;
sw->ports[7].dual_link_port = &sw->ports[8];
sw->ports[8].config.type = TB_TYPE_PORT;
sw->ports[8].config.max_in_hop_id = 19;
sw->ports[8].config.max_out_hop_id = 19;
sw->ports[8].total_credits = 60;
sw->ports[8].ctl_credits = 2;
sw->ports[8].dual_link_port = &sw->ports[7];
sw->ports[8].link_nr = 1;
sw->ports[9].config.type = TB_TYPE_PCIE_UP;
sw->ports[9].config.max_in_hop_id = 8;
sw->ports[9].config.max_out_hop_id = 8;
sw->ports[10].config.type = TB_TYPE_PCIE_DOWN;
sw->ports[10].config.max_in_hop_id = 8;
sw->ports[10].config.max_out_hop_id = 8;
sw->ports[11].config.type = TB_TYPE_PCIE_DOWN;
sw->ports[11].config.max_in_hop_id = 8;
sw->ports[11].config.max_out_hop_id = 8;
sw->ports[12].config.type = TB_TYPE_PCIE_DOWN;
sw->ports[12].config.max_in_hop_id = 8;
sw->ports[12].config.max_out_hop_id = 8;
sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT;
sw->ports[13].config.max_in_hop_id = 9;
sw->ports[13].config.max_out_hop_id = 9;
sw->ports[13].cap_adap = -1;
sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT;
sw->ports[14].config.max_in_hop_id = 9;
sw->ports[14].config.max_out_hop_id = 9;
sw->ports[14].cap_adap = -1;
sw->ports[15].disabled = true;
sw->ports[16].config.type = TB_TYPE_USB3_UP;
sw->ports[16].config.max_in_hop_id = 8;
sw->ports[16].config.max_out_hop_id = 8;
sw->ports[17].config.type = TB_TYPE_USB3_DOWN;
sw->ports[17].config.max_in_hop_id = 8;
sw->ports[17].config.max_out_hop_id = 8;
sw->ports[18].config.type = TB_TYPE_USB3_DOWN;
sw->ports[18].config.max_in_hop_id = 8;
sw->ports[18].config.max_out_hop_id = 8;
sw->ports[19].config.type = TB_TYPE_USB3_DOWN;
sw->ports[19].config.max_in_hop_id = 8;
sw->ports[19].config.max_out_hop_id = 8;
if (!parent)
return sw;
/* Link them */
upstream_port = tb_upstream_port(sw);
port = tb_port_at(route, parent);
port->remote = upstream_port;
upstream_port->remote = port;
if (port->dual_link_port && upstream_port->dual_link_port) {
port->dual_link_port->remote = upstream_port->dual_link_port;
upstream_port->dual_link_port->remote = port->dual_link_port;
if (bonded) {
/* Bonding is used */
port->bonded = true;
port->total_credits *= 2;
port->dual_link_port->bonded = true;
port->dual_link_port->total_credits = 0;
upstream_port->bonded = true;
upstream_port->total_credits *= 2;
upstream_port->dual_link_port->bonded = true;
upstream_port->dual_link_port->total_credits = 0;
}
}
return sw;
}
static struct tb_switch *alloc_dev_with_dpin(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
{
struct tb_switch *sw;
sw = alloc_dev_default(test, parent, route, bonded);
if (!sw)
return NULL;
sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN;
sw->ports[13].config.max_in_hop_id = 9;
sw->ports[13].config.max_out_hop_id = 9;
sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN;
sw->ports[14].config.max_in_hop_id = 9;
sw->ports[14].config.max_out_hop_id = 9;
return sw;
}
static struct tb_switch *alloc_dev_without_dp(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
{
struct tb_switch *sw;
int i;
sw = alloc_dev_default(test, parent, route, bonded);
if (!sw)
return NULL;
/*
* Device with:
* 2x USB4 Adapters (adapters 1,2 and 3,4),
* 1x PCIe Upstream (adapter 9),
* 1x PCIe Downstream (adapter 10),
* 1x USB3 Upstream (adapter 16),
* 1x USB3 Downstream (adapter 17)
*/
for (i = 5; i <= 8; i++)
sw->ports[i].disabled = true;
for (i = 11; i <= 14; i++)
sw->ports[i].disabled = true;
sw->ports[13].cap_adap = 0;
sw->ports[14].cap_adap = 0;
for (i = 18; i <= 19; i++)
sw->ports[i].disabled = true;
sw->generation = 4;
sw->credit_allocation = true;
sw->max_usb3_credits = 109;
sw->min_dp_aux_credits = 0;
sw->min_dp_main_credits = 0;
sw->max_pcie_credits = 30;
sw->max_dma_credits = 1;
return sw;
}
static struct tb_switch *alloc_dev_usb4(struct kunit *test,
struct tb_switch *parent,
u64 route, bool bonded)
{
struct tb_switch *sw;
sw = alloc_dev_default(test, parent, route, bonded);
if (!sw)
return NULL;
sw->generation = 4;
sw->credit_allocation = true;
sw->max_usb3_credits = 14;
sw->min_dp_aux_credits = 1;
sw->min_dp_main_credits = 18;
sw->max_pcie_credits = 32;
sw->max_dma_credits = 14;
return sw;
}
static void tb_test_path_basic(struct kunit *test)
{
struct tb_port *src_port, *dst_port, *p;
struct tb_switch *host;
host = alloc_host(test);
src_port = &host->ports[5];
dst_port = src_port;
p = tb_next_port_on_path(src_port, dst_port, NULL);
KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
p = tb_next_port_on_path(src_port, dst_port, p);
KUNIT_EXPECT_TRUE(test, !p);
}
static void tb_test_path_not_connected_walk(struct kunit *test)
{
struct tb_port *src_port, *dst_port, *p;
struct tb_switch *host, *dev;
host = alloc_host(test);
/* No connection between host and dev */
dev = alloc_dev_default(test, NULL, 3, true);
src_port = &host->ports[12];
dst_port = &dev->ports[16];
p = tb_next_port_on_path(src_port, dst_port, NULL);
KUNIT_EXPECT_PTR_EQ(test, p, src_port);
p = tb_next_port_on_path(src_port, dst_port, p);
KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]);
p = tb_next_port_on_path(src_port, dst_port, p);
KUNIT_EXPECT_TRUE(test, !p);
/* Other direction */
p = tb_next_port_on_path(dst_port, src_port, NULL);
KUNIT_EXPECT_PTR_EQ(test, p, dst_port);
p = tb_next_port_on_path(dst_port, src_port, p);
KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]);
p = tb_next_port_on_path(dst_port, src_port, p);
KUNIT_EXPECT_TRUE(test, !p);
}
struct port_expectation {
u64 route;
u8 port;
enum tb_port_type type;
};
static void tb_test_path_single_hop_walk(struct kunit *test)
{
/*
* Walks from Host PCIe downstream port to Device #1 PCIe
* upstream port.
*
* [Host]
* 1 |
* 1 |
* [Device]
*/
static const struct port_expectation test_data[] = {
{ .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN },
{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP },
};
struct tb_port *src_port, *dst_port, *p;
struct tb_switch *host, *dev;
int i;
host = alloc_host(test);
dev = alloc_dev_default(test, host, 1, true);
src_port = &host->ports[8];
dst_port = &dev->ports[9];
/* Walk both directions */
i = 0;
tb_for_each_port_on_path(src_port, dst_port, p) {
KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
test_data[i].type);
i++;
}
KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
i = ARRAY_SIZE(test_data) - 1;
tb_for_each_port_on_path(dst_port, src_port, p) {
KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
test_data[i].type);
i--;
}
KUNIT_EXPECT_EQ(test, i, -1);
}
static void tb_test_path_daisy_chain_walk(struct kunit *test)
{
/*
* Walks from Host DP IN to Device #2 DP OUT.
*
* [Host]
* 1 |
* 1 |
* [Device #1]
* 3 /
* 1 /
* [Device #2]
*/
static const struct port_expectation test_data[] = {
{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
};
struct tb_port *src_port, *dst_port, *p;
struct tb_switch *host, *dev1, *dev2;
int i;
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
dev2 = alloc_dev_default(test, dev1, 0x301, true);
src_port = &host->ports[5];
dst_port = &dev2->ports[13];
/* Walk both directions */
i = 0;
tb_for_each_port_on_path(src_port, dst_port, p) {
KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
test_data[i].type);
i++;
}
KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
i = ARRAY_SIZE(test_data) - 1;
tb_for_each_port_on_path(dst_port, src_port, p) {
KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
test_data[i].type);
i--;
}
KUNIT_EXPECT_EQ(test, i, -1);
}
static void tb_test_path_simple_tree_walk(struct kunit *test)
{
/*
* Walks from Host DP IN to Device #3 DP OUT.
*
* [Host]
* 1 |
* 1 |
* [Device #1]
* 3 / | 5 \ 7
* 1 / | \ 1
* [Device #2] | [Device #4]
* | 1
* [Device #3]
*/
static const struct port_expectation test_data[] = {
{ .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN },
{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x1, .port = 5, .type = TB_TYPE_PORT },
{ .route = 0x501, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
};
struct tb_port *src_port, *dst_port, *p;
struct tb_switch *host, *dev1, *dev3;
int i;
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
alloc_dev_default(test, dev1, 0x301, true);
dev3 = alloc_dev_default(test, dev1, 0x501, true);
alloc_dev_default(test, dev1, 0x701, true);
src_port = &host->ports[5];
dst_port = &dev3->ports[13];
/* Walk both directions */
i = 0;
tb_for_each_port_on_path(src_port, dst_port, p) {
KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
test_data[i].type);
i++;
}
KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
i = ARRAY_SIZE(test_data) - 1;
tb_for_each_port_on_path(dst_port, src_port, p) {
KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
test_data[i].type);
i--;
}
KUNIT_EXPECT_EQ(test, i, -1);
}
static void tb_test_path_complex_tree_walk(struct kunit *test)
{
/*
* Walks from Device #3 DP IN to Device #9 DP OUT.
*
* [Host]
* 1 |
* 1 |
* [Device #1]
* 3 / | 5 \ 7
* 1 / | \ 1
* [Device #2] | [Device #5]
* 5 | | 1 \ 7
* 1 | [Device #4] \ 1
* [Device #3] [Device #6]
* 3 /
* 1 /
* [Device #7]
* 3 / | 5
* 1 / |
* [Device #8] | 1
* [Device #9]
*/
static const struct port_expectation test_data[] = {
{ .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
{ .route = 0x50301, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x301, .port = 5, .type = TB_TYPE_PORT },
{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x1, .port = 7, .type = TB_TYPE_PORT },
{ .route = 0x701, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x701, .port = 7, .type = TB_TYPE_PORT },
{ .route = 0x70701, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x70701, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT },
{ .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT },
};
struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9;
struct tb_port *src_port, *dst_port, *p;
int i;
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
dev2 = alloc_dev_default(test, dev1, 0x301, true);
dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true);
alloc_dev_default(test, dev1, 0x501, true);
dev5 = alloc_dev_default(test, dev1, 0x701, true);
dev6 = alloc_dev_default(test, dev5, 0x70701, true);
dev7 = alloc_dev_default(test, dev6, 0x3070701, true);
alloc_dev_default(test, dev7, 0x303070701, true);
dev9 = alloc_dev_default(test, dev7, 0x503070701, true);
src_port = &dev3->ports[13];
dst_port = &dev9->ports[14];
/* Walk both directions */
i = 0;
tb_for_each_port_on_path(src_port, dst_port, p) {
KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
test_data[i].type);
i++;
}
KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
i = ARRAY_SIZE(test_data) - 1;
tb_for_each_port_on_path(dst_port, src_port, p) {
KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
test_data[i].type);
i--;
}
KUNIT_EXPECT_EQ(test, i, -1);
}
static void tb_test_path_max_length_walk(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
struct tb_port *src_port, *dst_port, *p;
int i;
/*
* Walks from Device #6 DP IN to Device #12 DP OUT.
*
* [Host]
* 1 / \ 3
* 1 / \ 1
* [Device #1] [Device #7]
* 3 | | 3
* 1 | | 1
* [Device #2] [Device #8]
* 3 | | 3
* 1 | | 1
* [Device #3] [Device #9]
* 3 | | 3
* 1 | | 1
* [Device #4] [Device #10]
* 3 | | 3
* 1 | | 1
* [Device #5] [Device #11]
* 3 | | 3
* 1 | | 1
* [Device #6] [Device #12]
*/
static const struct port_expectation test_data[] = {
{ .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN },
{ .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x30301, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x30301, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x301, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x301, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x1, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x1, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x0, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x0, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x3, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x3, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x303, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x303, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x30303, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x30303, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT },
{ .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT },
{ .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT },
};
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
dev2 = alloc_dev_default(test, dev1, 0x301, true);
dev3 = alloc_dev_default(test, dev2, 0x30301, true);
dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
dev7 = alloc_dev_default(test, host, 0x3, true);
dev8 = alloc_dev_default(test, dev7, 0x303, true);
dev9 = alloc_dev_default(test, dev8, 0x30303, true);
dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
src_port = &dev6->ports[13];
dst_port = &dev12->ports[13];
/* Walk both directions */
i = 0;
tb_for_each_port_on_path(src_port, dst_port, p) {
KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
test_data[i].type);
i++;
}
KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data));
i = ARRAY_SIZE(test_data) - 1;
tb_for_each_port_on_path(dst_port, src_port, p) {
KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data));
KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, p->port, test_data[i].port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type,
test_data[i].type);
i--;
}
KUNIT_EXPECT_EQ(test, i, -1);
}
static void tb_test_path_not_connected(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2;
struct tb_port *down, *up;
struct tb_path *path;
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x3, false);
/* Not connected to anything */
dev2 = alloc_dev_default(test, NULL, 0x303, false);
down = &dev1->ports[10];
up = &dev2->ports[9];
path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
KUNIT_ASSERT_NULL(test, path);
path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down");
KUNIT_ASSERT_NULL(test, path);
}
struct hop_expectation {
u64 route;
u8 in_port;
enum tb_port_type in_type;
u8 out_port;
enum tb_port_type out_type;
};
static void tb_test_path_not_bonded_lane0(struct kunit *test)
{
/*
* PCIe path from host to device using lane 0.
*
* [Host]
* 3 |: 4
* 1 |: 2
* [Device]
*/
static const struct hop_expectation test_data[] = {
{
.route = 0x0,
.in_port = 9,
.in_type = TB_TYPE_PCIE_DOWN,
.out_port = 3,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x3,
.in_port = 1,
.in_type = TB_TYPE_PORT,
.out_port = 9,
.out_type = TB_TYPE_PCIE_UP,
},
};
struct tb_switch *host, *dev;
struct tb_port *down, *up;
struct tb_path *path;
int i;
host = alloc_host(test);
dev = alloc_dev_default(test, host, 0x3, false);
down = &host->ports[9];
up = &dev->ports[9];
path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down");
KUNIT_ASSERT_NOT_NULL(test, path);
KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
in_port = path->hops[i].in_port;
out_port = path->hops[i].out_port;
KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
test_data[i].in_type);
KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
test_data[i].out_type);
}
tb_path_free(path);
}
static void tb_test_path_not_bonded_lane1(struct kunit *test)
{
/*
* DP Video path from host to device using lane 1. Paths like
* these are only used with Thunderbolt 1 devices where lane
* bonding is not possible. USB4 specifically does not allow
* paths like this (you either use lane 0 where lane 1 is
* disabled or both lanes are bonded).
*
* [Host]
* 1 :| 2
* 1 :| 2
* [Device]
*/
static const struct hop_expectation test_data[] = {
{
.route = 0x0,
.in_port = 5,
.in_type = TB_TYPE_DP_HDMI_IN,
.out_port = 2,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x1,
.in_port = 2,
.in_type = TB_TYPE_PORT,
.out_port = 13,
.out_type = TB_TYPE_DP_HDMI_OUT,
},
};
struct tb_switch *host, *dev;
struct tb_port *in, *out;
struct tb_path *path;
int i;
host = alloc_host(test);
dev = alloc_dev_default(test, host, 0x1, false);
in = &host->ports[5];
out = &dev->ports[13];
path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
KUNIT_ASSERT_NOT_NULL(test, path);
KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
in_port = path->hops[i].in_port;
out_port = path->hops[i].out_port;
KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
test_data[i].in_type);
KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
test_data[i].out_type);
}
tb_path_free(path);
}
static void tb_test_path_not_bonded_lane1_chain(struct kunit *test)
{
/*
* DP Video path from host to device 3 using lane 1.
*
* [Host]
* 1 :| 2
* 1 :| 2
* [Device #1]
* 7 :| 8
* 1 :| 2
* [Device #2]
* 5 :| 6
* 1 :| 2
* [Device #3]
*/
static const struct hop_expectation test_data[] = {
{
.route = 0x0,
.in_port = 5,
.in_type = TB_TYPE_DP_HDMI_IN,
.out_port = 2,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x1,
.in_port = 2,
.in_type = TB_TYPE_PORT,
.out_port = 8,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x701,
.in_port = 2,
.in_type = TB_TYPE_PORT,
.out_port = 6,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x50701,
.in_port = 2,
.in_type = TB_TYPE_PORT,
.out_port = 13,
.out_type = TB_TYPE_DP_HDMI_OUT,
},
};
struct tb_switch *host, *dev1, *dev2, *dev3;
struct tb_port *in, *out;
struct tb_path *path;
int i;
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, false);
dev2 = alloc_dev_default(test, dev1, 0x701, false);
dev3 = alloc_dev_default(test, dev2, 0x50701, false);
in = &host->ports[5];
out = &dev3->ports[13];
path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
KUNIT_ASSERT_NOT_NULL(test, path);
KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
in_port = path->hops[i].in_port;
out_port = path->hops[i].out_port;
KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
test_data[i].in_type);
KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
test_data[i].out_type);
}
tb_path_free(path);
}
static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test)
{
/*
* DP Video path from device 3 to host using lane 1.
*
* [Host]
* 1 :| 2
* 1 :| 2
* [Device #1]
* 7 :| 8
* 1 :| 2
* [Device #2]
* 5 :| 6
* 1 :| 2
* [Device #3]
*/
static const struct hop_expectation test_data[] = {
{
.route = 0x50701,
.in_port = 13,
.in_type = TB_TYPE_DP_HDMI_IN,
.out_port = 2,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x701,
.in_port = 6,
.in_type = TB_TYPE_PORT,
.out_port = 2,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x1,
.in_port = 8,
.in_type = TB_TYPE_PORT,
.out_port = 2,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x0,
.in_port = 2,
.in_type = TB_TYPE_PORT,
.out_port = 5,
.out_type = TB_TYPE_DP_HDMI_IN,
},
};
struct tb_switch *host, *dev1, *dev2, *dev3;
struct tb_port *in, *out;
struct tb_path *path;
int i;
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, false);
dev2 = alloc_dev_default(test, dev1, 0x701, false);
dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false);
in = &dev3->ports[13];
out = &host->ports[5];
path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
KUNIT_ASSERT_NOT_NULL(test, path);
KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
in_port = path->hops[i].in_port;
out_port = path->hops[i].out_port;
KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
test_data[i].in_type);
KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
test_data[i].out_type);
}
tb_path_free(path);
}
static void tb_test_path_mixed_chain(struct kunit *test)
{
/*
* DP Video path from host to device 4 where first and last link
* is bonded.
*
* [Host]
* 1 |
* 1 |
* [Device #1]
* 7 :| 8
* 1 :| 2
* [Device #2]
* 5 :| 6
* 1 :| 2
* [Device #3]
* 3 |
* 1 |
* [Device #4]
*/
static const struct hop_expectation test_data[] = {
{
.route = 0x0,
.in_port = 5,
.in_type = TB_TYPE_DP_HDMI_IN,
.out_port = 1,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x1,
.in_port = 1,
.in_type = TB_TYPE_PORT,
.out_port = 8,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x701,
.in_port = 2,
.in_type = TB_TYPE_PORT,
.out_port = 6,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x50701,
.in_port = 2,
.in_type = TB_TYPE_PORT,
.out_port = 3,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x3050701,
.in_port = 1,
.in_type = TB_TYPE_PORT,
.out_port = 13,
.out_type = TB_TYPE_DP_HDMI_OUT,
},
};
struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
struct tb_port *in, *out;
struct tb_path *path;
int i;
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
dev2 = alloc_dev_default(test, dev1, 0x701, false);
dev3 = alloc_dev_default(test, dev2, 0x50701, false);
dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
in = &host->ports[5];
out = &dev4->ports[13];
path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
KUNIT_ASSERT_NOT_NULL(test, path);
KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
in_port = path->hops[i].in_port;
out_port = path->hops[i].out_port;
KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
test_data[i].in_type);
KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
test_data[i].out_type);
}
tb_path_free(path);
}
static void tb_test_path_mixed_chain_reverse(struct kunit *test)
{
/*
* DP Video path from device 4 to host where first and last link
* is bonded.
*
* [Host]
* 1 |
* 1 |
* [Device #1]
* 7 :| 8
* 1 :| 2
* [Device #2]
* 5 :| 6
* 1 :| 2
* [Device #3]
* 3 |
* 1 |
* [Device #4]
*/
static const struct hop_expectation test_data[] = {
{
.route = 0x3050701,
.in_port = 13,
.in_type = TB_TYPE_DP_HDMI_OUT,
.out_port = 1,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x50701,
.in_port = 3,
.in_type = TB_TYPE_PORT,
.out_port = 2,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x701,
.in_port = 6,
.in_type = TB_TYPE_PORT,
.out_port = 2,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x1,
.in_port = 8,
.in_type = TB_TYPE_PORT,
.out_port = 1,
.out_type = TB_TYPE_PORT,
},
{
.route = 0x0,
.in_port = 1,
.in_type = TB_TYPE_PORT,
.out_port = 5,
.out_type = TB_TYPE_DP_HDMI_IN,
},
};
struct tb_switch *host, *dev1, *dev2, *dev3, *dev4;
struct tb_port *in, *out;
struct tb_path *path;
int i;
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
dev2 = alloc_dev_default(test, dev1, 0x701, false);
dev3 = alloc_dev_default(test, dev2, 0x50701, false);
dev4 = alloc_dev_default(test, dev3, 0x3050701, true);
in = &dev4->ports[13];
out = &host->ports[5];
path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video");
KUNIT_ASSERT_NOT_NULL(test, path);
KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data));
for (i = 0; i < ARRAY_SIZE(test_data); i++) {
const struct tb_port *in_port, *out_port;
in_port = path->hops[i].in_port;
out_port = path->hops[i].out_port;
KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type,
test_data[i].in_type);
KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route);
KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port);
KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type,
test_data[i].out_type);
}
tb_path_free(path);
}
static void tb_test_tunnel_pcie(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2;
struct tb_tunnel *tunnel1, *tunnel2;
struct tb_port *down, *up;
/*
* Create PCIe tunnel between host and two devices.
*
* [Host]
* 1 |
* 1 |
* [Device #1]
* 5 |
* 1 |
* [Device #2]
*/
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
dev2 = alloc_dev_default(test, dev1, 0x501, true);
down = &host->ports[8];
up = &dev1->ports[9];
tunnel1 = tb_tunnel_alloc_pci(NULL, up, down);
KUNIT_ASSERT_NOT_NULL(test, tunnel1);
KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
down = &dev1->ports[10];
up = &dev2->ports[9];
tunnel2 = tb_tunnel_alloc_pci(NULL, up, down);
KUNIT_ASSERT_NOT_NULL(test, tunnel2);
KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
tb_tunnel_free(tunnel2);
tb_tunnel_free(tunnel1);
}
static void tb_test_tunnel_dp(struct kunit *test)
{
struct tb_switch *host, *dev;
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
/*
* Create DP tunnel between Host and Device
*
* [Host]
* 1 |
* 1 |
* [Device]
*/
host = alloc_host(test);
dev = alloc_dev_default(test, host, 0x3, true);
in = &host->ports[5];
out = &dev->ports[13];
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out);
KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out);
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dp_chain(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev4;
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
/*
* Create DP tunnel from Host DP IN to Device #4 DP OUT.
*
* [Host]
* 1 |
* 1 |
* [Device #1]
* 3 / | 5 \ 7
* 1 / | \ 1
* [Device #2] | [Device #4]
* | 1
* [Device #3]
*/
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
alloc_dev_default(test, dev1, 0x301, true);
alloc_dev_default(test, dev1, 0x501, true);
dev4 = alloc_dev_default(test, dev1, 0x701, true);
in = &host->ports[5];
out = &dev4->ports[14];
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out);
KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out);
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dp_tree(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2, *dev3, *dev5;
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
/*
* Create DP tunnel from Device #2 DP IN to Device #5 DP OUT.
*
* [Host]
* 3 |
* 1 |
* [Device #1]
* 3 / | 5 \ 7
* 1 / | \ 1
* [Device #2] | [Device #4]
* | 1
* [Device #3]
* | 5
* | 1
* [Device #5]
*/
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x3, true);
dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
dev3 = alloc_dev_default(test, dev1, 0x503, true);
alloc_dev_default(test, dev1, 0x703, true);
dev5 = alloc_dev_default(test, dev3, 0x50503, true);
in = &dev2->ports[13];
out = &dev5->ports[13];
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out);
KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out);
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dp_max_length(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6;
struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12;
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
/*
* Creates DP tunnel from Device #6 to Device #12.
*
* [Host]
* 1 / \ 3
* 1 / \ 1
* [Device #1] [Device #7]
* 3 | | 3
* 1 | | 1
* [Device #2] [Device #8]
* 3 | | 3
* 1 | | 1
* [Device #3] [Device #9]
* 3 | | 3
* 1 | | 1
* [Device #4] [Device #10]
* 3 | | 3
* 1 | | 1
* [Device #5] [Device #11]
* 3 | | 3
* 1 | | 1
* [Device #6] [Device #12]
*/
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
dev2 = alloc_dev_default(test, dev1, 0x301, true);
dev3 = alloc_dev_default(test, dev2, 0x30301, true);
dev4 = alloc_dev_default(test, dev3, 0x3030301, true);
dev5 = alloc_dev_default(test, dev4, 0x303030301, true);
dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true);
dev7 = alloc_dev_default(test, host, 0x3, true);
dev8 = alloc_dev_default(test, dev7, 0x303, true);
dev9 = alloc_dev_default(test, dev8, 0x30303, true);
dev10 = alloc_dev_default(test, dev9, 0x3030303, true);
dev11 = alloc_dev_default(test, dev10, 0x303030303, true);
dev12 = alloc_dev_default(test, dev11, 0x30303030303, true);
in = &dev6->ports[13];
out = &dev12->ports[13];
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out);
KUNIT_ASSERT_EQ(test, tunnel->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13);
/* First hop */
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in);
/* Middle */
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port,
&host->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port,
&host->ports[3]);
/* Last */
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out);
KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port,
&host->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port,
&host->ports[3]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out);
KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port,
&host->ports[3]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port,
&host->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_3dp(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
struct tb_port *in1, *in2, *in3, *out1, *out2, *out3;
struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
/*
* Create 3 DP tunnels from Host to Devices #2, #5 and #4.
*
* [Host]
* 3 |
* 1 |
* [Device #1]
* 3 / | 5 \ 7
* 1 / | \ 1
* [Device #2] | [Device #4]
* | 1
* [Device #3]
* | 5
* | 1
* [Device #5]
*/
host = alloc_host_br(test);
dev1 = alloc_dev_default(test, host, 0x3, true);
dev2 = alloc_dev_default(test, dev1, 0x303, true);
dev3 = alloc_dev_default(test, dev1, 0x503, true);
dev4 = alloc_dev_default(test, dev1, 0x703, true);
dev5 = alloc_dev_default(test, dev3, 0x50503, true);
in1 = &host->ports[5];
in2 = &host->ports[6];
in3 = &host->ports[10];
out1 = &dev2->ports[13];
out2 = &dev5->ports[13];
out3 = &dev4->ports[14];
tunnel1 = tb_tunnel_alloc_dp(NULL, in1, out1, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel1 != NULL);
KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, in1);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, out1);
KUNIT_ASSERT_EQ(test, tunnel1->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 3);
tunnel2 = tb_tunnel_alloc_dp(NULL, in2, out2, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel2 != NULL);
KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, in2);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, out2);
KUNIT_ASSERT_EQ(test, tunnel2->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 4);
tunnel3 = tb_tunnel_alloc_dp(NULL, in3, out3, 1, 0, 0);
KUNIT_ASSERT_TRUE(test, tunnel3 != NULL);
KUNIT_EXPECT_EQ(test, tunnel3->type, TB_TUNNEL_DP);
KUNIT_EXPECT_PTR_EQ(test, tunnel3->src_port, in3);
KUNIT_EXPECT_PTR_EQ(test, tunnel3->dst_port, out3);
KUNIT_ASSERT_EQ(test, tunnel3->npaths, 3);
KUNIT_ASSERT_EQ(test, tunnel3->paths[0]->path_length, 3);
tb_tunnel_free(tunnel2);
tb_tunnel_free(tunnel1);
}
static void tb_test_tunnel_usb3(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2;
struct tb_tunnel *tunnel1, *tunnel2;
struct tb_port *down, *up;
/*
* Create USB3 tunnel between host and two devices.
*
* [Host]
* 1 |
* 1 |
* [Device #1]
* \ 7
* \ 1
* [Device #2]
*/
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
dev2 = alloc_dev_default(test, dev1, 0x701, true);
down = &host->ports[12];
up = &dev1->ports[16];
tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel1);
KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up);
KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2);
KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up);
KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down);
down = &dev1->ports[17];
up = &dev2->ports[16];
tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel2);
KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up);
KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2);
KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up);
KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up);
KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down);
tb_tunnel_free(tunnel2);
tb_tunnel_free(tunnel1);
}
static void tb_test_tunnel_port_on_path(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5;
struct tb_port *in, *out, *port;
struct tb_tunnel *dp_tunnel;
/*
* [Host]
* 3 |
* 1 |
* [Device #1]
* 3 / | 5 \ 7
* 1 / | \ 1
* [Device #2] | [Device #4]
* | 1
* [Device #3]
* | 5
* | 1
* [Device #5]
*/
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x3, true);
dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true);
dev3 = alloc_dev_default(test, dev1, 0x503, true);
dev4 = alloc_dev_default(test, dev1, 0x703, true);
dev5 = alloc_dev_default(test, dev3, 0x50503, true);
in = &dev2->ports[13];
out = &dev5->ports[13];
dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel);
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in));
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out));
port = &host->ports[8];
KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
port = &host->ports[3];
KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
port = &dev1->ports[1];
KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
port = &dev1->ports[3];
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
port = &dev1->ports[5];
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
port = &dev1->ports[7];
KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
port = &dev3->ports[1];
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
port = &dev5->ports[1];
KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port));
port = &dev4->ports[1];
KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port));
tb_tunnel_free(dp_tunnel);
}
static void tb_test_tunnel_dma(struct kunit *test)
{
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
struct tb_switch *host;
/*
* Create DMA tunnel from NHI to port 1 and back.
*
* [Host 1]
* 1 ^ In HopID 1 -> Out HopID 8
* |
* v In HopID 8 -> Out HopID 1
* ............ Domain border
* |
* [Host 2]
*/
host = alloc_host(test);
nhi = &host->ports[7];
port = &host->ports[1];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
/* RX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1);
/* TX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dma_rx(struct kunit *test)
{
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
struct tb_switch *host;
/*
* Create DMA RX tunnel from port 1 to NHI.
*
* [Host 1]
* 1 ^
* |
* | In HopID 15 -> Out HopID 2
* ............ Domain border
* |
* [Host 2]
*/
host = alloc_host(test);
nhi = &host->ports[7];
port = &host->ports[1];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
/* RX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dma_tx(struct kunit *test)
{
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
struct tb_switch *host;
/*
* Create DMA TX tunnel from NHI to port 1.
*
* [Host 1]
* 1 | In HopID 2 -> Out HopID 15
* |
* v
* ............ Domain border
* |
* [Host 2]
*/
host = alloc_host(test);
nhi = &host->ports[7];
port = &host->ports[1];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
KUNIT_ASSERT_EQ(test, tunnel->npaths, 1);
/* TX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dma_chain(struct kunit *test)
{
struct tb_switch *host, *dev1, *dev2;
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
/*
* Create DMA tunnel from NHI to Device #2 port 3 and back.
*
* [Host 1]
* 1 ^ In HopID 1 -> Out HopID x
* |
* 1 | In HopID x -> Out HopID 1
* [Device #1]
* 7 \
* 1 \
* [Device #2]
* 3 | In HopID x -> Out HopID 8
* |
* v In HopID 8 -> Out HopID x
* ............ Domain border
* |
* [Host 2]
*/
host = alloc_host(test);
dev1 = alloc_dev_default(test, host, 0x1, true);
dev2 = alloc_dev_default(test, dev1, 0x701, true);
nhi = &host->ports[7];
port = &dev2->ports[3];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA);
KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi);
KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port);
KUNIT_ASSERT_EQ(test, tunnel->npaths, 2);
/* RX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port,
&dev2->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port,
&dev1->ports[7]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port,
&dev1->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port,
&host->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1);
/* TX path */
KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port,
&dev1->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port,
&dev1->ports[7]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port,
&dev2->ports[1]);
KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port);
KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8);
tb_tunnel_free(tunnel);
}
static void tb_test_tunnel_dma_match(struct kunit *test)
{
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
struct tb_switch *host;
host = alloc_host(test);
nhi = &host->ports[7];
port = &host->ports[1];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1));
tb_tunnel_free(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
tb_tunnel_free(tunnel);
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11));
KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11));
KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1));
tb_tunnel_free(tunnel);
}
static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test)
{
struct tb_switch *host, *dev;
struct tb_port *up, *down;
struct tb_tunnel *tunnel;
struct tb_path *path;
host = alloc_host(test);
dev = alloc_dev_default(test, host, 0x1, false);
down = &host->ports[8];
up = &dev->ports[9];
tunnel = tb_tunnel_alloc_pci(NULL, up, down);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
path = tunnel->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
path = tunnel->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U);
tb_tunnel_free(tunnel);
}
static void tb_test_credit_alloc_legacy_bonded(struct kunit *test)
{
struct tb_switch *host, *dev;
struct tb_port *up, *down;
struct tb_tunnel *tunnel;
struct tb_path *path;
host = alloc_host(test);
dev = alloc_dev_default(test, host, 0x1, true);
down = &host->ports[8];
up = &dev->ports[9];
tunnel = tb_tunnel_alloc_pci(NULL, up, down);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
path = tunnel->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
path = tunnel->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
tb_tunnel_free(tunnel);
}
static void tb_test_credit_alloc_pcie(struct kunit *test)
{
struct tb_switch *host, *dev;
struct tb_port *up, *down;
struct tb_tunnel *tunnel;
struct tb_path *path;
host = alloc_host_usb4(test);
dev = alloc_dev_usb4(test, host, 0x1, true);
down = &host->ports[8];
up = &dev->ports[9];
tunnel = tb_tunnel_alloc_pci(NULL, up, down);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
path = tunnel->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
path = tunnel->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
tb_tunnel_free(tunnel);
}
static void tb_test_credit_alloc_without_dp(struct kunit *test)
{
struct tb_switch *host, *dev;
struct tb_port *up, *down;
struct tb_tunnel *tunnel;
struct tb_path *path;
host = alloc_host_usb4(test);
dev = alloc_dev_without_dp(test, host, 0x1, true);
/*
* The device has no DP therefore baMinDPmain = baMinDPaux = 0
*
* Create PCIe path with buffers less than baMaxPCIe.
*
* For a device with buffers configurations:
* baMaxUSB3 = 109
* baMinDPaux = 0
* baMinDPmain = 0
* baMaxPCIe = 30
* baMaxHI = 1
* Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118
* PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3)
* = Max(6, Min(30, 9) = 9
*/
down = &host->ports[8];
up = &dev->ports[9];
tunnel = tb_tunnel_alloc_pci(NULL, up, down);
KUNIT_ASSERT_TRUE(test, tunnel != NULL);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
/* PCIe downstream path */
path = tunnel->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U);
/* PCIe upstream path */
path = tunnel->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
tb_tunnel_free(tunnel);
}
static void tb_test_credit_alloc_dp(struct kunit *test)
{
struct tb_switch *host, *dev;
struct tb_port *in, *out;
struct tb_tunnel *tunnel;
struct tb_path *path;
host = alloc_host_usb4(test);
dev = alloc_dev_usb4(test, host, 0x1, true);
in = &host->ports[5];
out = &dev->ports[14];
tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3);
/* Video (main) path */
path = tunnel->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
/* AUX TX */
path = tunnel->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
/* AUX RX */
path = tunnel->paths[2];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
tb_tunnel_free(tunnel);
}
static void tb_test_credit_alloc_usb3(struct kunit *test)
{
struct tb_switch *host, *dev;
struct tb_port *up, *down;
struct tb_tunnel *tunnel;
struct tb_path *path;
host = alloc_host_usb4(test);
dev = alloc_dev_usb4(test, host, 0x1, true);
down = &host->ports[12];
up = &dev->ports[16];
tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
path = tunnel->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
path = tunnel->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
tb_tunnel_free(tunnel);
}
static void tb_test_credit_alloc_dma(struct kunit *test)
{
struct tb_switch *host, *dev;
struct tb_port *nhi, *port;
struct tb_tunnel *tunnel;
struct tb_path *path;
host = alloc_host_usb4(test);
dev = alloc_dev_usb4(test, host, 0x1, true);
nhi = &host->ports[7];
port = &dev->ports[3];
tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
KUNIT_ASSERT_NOT_NULL(test, tunnel);
KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2);
/* DMA RX */
path = tunnel->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
/* DMA TX */
path = tunnel->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
tb_tunnel_free(tunnel);
}
static void tb_test_credit_alloc_dma_multiple(struct kunit *test)
{
struct tb_tunnel *tunnel1, *tunnel2, *tunnel3;
struct tb_switch *host, *dev;
struct tb_port *nhi, *port;
struct tb_path *path;
host = alloc_host_usb4(test);
dev = alloc_dev_usb4(test, host, 0x1, true);
nhi = &host->ports[7];
port = &dev->ports[3];
/*
* Create three DMA tunnels through the same ports. With the
* default buffers we should be able to create two and the last
* one fails.
*
* For default host we have following buffers for DMA:
*
* 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20
*
* For device we have following:
*
* 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34
*
* spare = 14 + 1 = 15
*
* So on host the first tunnel gets 14 and the second gets the
* remaining 1 and then we run out of buffers.
*/
tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
KUNIT_ASSERT_NOT_NULL(test, tunnel1);
KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2);
path = tunnel1->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
path = tunnel1->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
KUNIT_ASSERT_NOT_NULL(test, tunnel2);
KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2);
path = tunnel2->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
path = tunnel2->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
KUNIT_ASSERT_NULL(test, tunnel3);
/*
* Release the first DMA tunnel. That should make 14 buffers
* available for the next tunnel.
*/
tb_tunnel_free(tunnel1);
tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3);
KUNIT_ASSERT_NOT_NULL(test, tunnel3);
path = tunnel3->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
path = tunnel3->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
tb_tunnel_free(tunnel3);
tb_tunnel_free(tunnel2);
}
static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test,
struct tb_switch *host, struct tb_switch *dev)
{
struct tb_port *up, *down;
struct tb_tunnel *pcie_tunnel;
struct tb_path *path;
down = &host->ports[8];
up = &dev->ports[9];
pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down);
KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel);
KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2);
path = pcie_tunnel->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
path = pcie_tunnel->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U);
return pcie_tunnel;
}
static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test,
struct tb_switch *host, struct tb_switch *dev)
{
struct tb_port *in, *out;
struct tb_tunnel *dp_tunnel1;
struct tb_path *path;
in = &host->ports[5];
out = &dev->ports[13];
dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1);
KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3);
path = dp_tunnel1->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
path = dp_tunnel1->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
path = dp_tunnel1->paths[2];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
return dp_tunnel1;
}
static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test,
struct tb_switch *host, struct tb_switch *dev)
{
struct tb_port *in, *out;
struct tb_tunnel *dp_tunnel2;
struct tb_path *path;
in = &host->ports[6];
out = &dev->ports[14];
dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2);
KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3);
path = dp_tunnel2->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U);
path = dp_tunnel2->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
path = dp_tunnel2->paths[2];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
return dp_tunnel2;
}
static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test,
struct tb_switch *host, struct tb_switch *dev)
{
struct tb_port *up, *down;
struct tb_tunnel *usb3_tunnel;
struct tb_path *path;
down = &host->ports[12];
up = &dev->ports[16];
usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0);
KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel);
KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2);
path = usb3_tunnel->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
path = usb3_tunnel->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U);
return usb3_tunnel;
}
static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test,
struct tb_switch *host, struct tb_switch *dev)
{
struct tb_port *nhi, *port;
struct tb_tunnel *dma_tunnel1;
struct tb_path *path;
nhi = &host->ports[7];
port = &dev->ports[3];
dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1);
KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1);
KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2);
path = dma_tunnel1->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
path = dma_tunnel1->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U);
return dma_tunnel1;
}
static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test,
struct tb_switch *host, struct tb_switch *dev)
{
struct tb_port *nhi, *port;
struct tb_tunnel *dma_tunnel2;
struct tb_path *path;
nhi = &host->ports[7];
port = &dev->ports[3];
dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2);
KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2);
KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2);
path = dma_tunnel2->paths[0];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
path = dma_tunnel2->paths[1];
KUNIT_ASSERT_EQ(test, path->path_length, 2);
KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U);
KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U);
return dma_tunnel2;
}
static void tb_test_credit_alloc_all(struct kunit *test)
{
struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel;
struct tb_tunnel *dma_tunnel1, *dma_tunnel2;
struct tb_switch *host, *dev;
/*
* Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to
* device. Expectation is that all these can be established with
* the default credit allocation found in Intel hardware.
*/
host = alloc_host_usb4(test);
dev = alloc_dev_usb4(test, host, 0x1, true);
pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev);
dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev);
dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev);
usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev);
dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev);
dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev);
tb_tunnel_free(dma_tunnel2);
tb_tunnel_free(dma_tunnel1);
tb_tunnel_free(usb3_tunnel);
tb_tunnel_free(dp_tunnel2);
tb_tunnel_free(dp_tunnel1);
tb_tunnel_free(pcie_tunnel);
}
static const u32 root_directory[] = {
0x55584401, /* "UXD" v1 */
0x00000018, /* Root directory length */
0x76656e64, /* "vend" */
0x6f726964, /* "orid" */
0x76000001, /* "v" R 1 */
0x00000a27, /* Immediate value, ! Vendor ID */
0x76656e64, /* "vend" */
0x6f726964, /* "orid" */
0x74000003, /* "t" R 3 */
0x0000001a, /* Text leaf offset, (“Apple Inc.”) */
0x64657669, /* "devi" */
0x63656964, /* "ceid" */
0x76000001, /* "v" R 1 */
0x0000000a, /* Immediate value, ! Device ID */
0x64657669, /* "devi" */
0x63656964, /* "ceid" */
0x74000003, /* "t" R 3 */
0x0000001d, /* Text leaf offset, (“Macintosh”) */
0x64657669, /* "devi" */
0x63657276, /* "cerv" */
0x76000001, /* "v" R 1 */
0x80000100, /* Immediate value, Device Revision */
0x6e657477, /* "netw" */
0x6f726b00, /* "ork" */
0x44000014, /* "D" R 20 */
0x00000021, /* Directory data offset, (Network Directory) */
0x4170706c, /* "Appl" */
0x6520496e, /* "e In" */
0x632e0000, /* "c." ! */
0x4d616369, /* "Maci" */
0x6e746f73, /* "ntos" */
0x68000000, /* "h" */
0x00000000, /* padding */
0xca8961c6, /* Directory UUID, Network Directory */
0x9541ce1c, /* Directory UUID, Network Directory */
0x5949b8bd, /* Directory UUID, Network Directory */
0x4f5a5f2e, /* Directory UUID, Network Directory */
0x70727463, /* "prtc" */
0x69640000, /* "id" */
0x76000001, /* "v" R 1 */
0x00000001, /* Immediate value, Network Protocol ID */
0x70727463, /* "prtc" */
0x76657273, /* "vers" */
0x76000001, /* "v" R 1 */
0x00000001, /* Immediate value, Network Protocol Version */
0x70727463, /* "prtc" */
0x72657673, /* "revs" */
0x76000001, /* "v" R 1 */
0x00000001, /* Immediate value, Network Protocol Revision */
0x70727463, /* "prtc" */
0x73746e73, /* "stns" */
0x76000001, /* "v" R 1 */
0x00000000, /* Immediate value, Network Protocol Settings */
};
static const uuid_t network_dir_uuid =
UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
static void tb_test_property_parse(struct kunit *test)
{
struct tb_property_dir *dir, *network_dir;
struct tb_property *p;
dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
KUNIT_ASSERT_NOT_NULL(test, dir);
p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT);
KUNIT_ASSERT_NULL(test, p);
p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
KUNIT_ASSERT_NOT_NULL(test, p);
KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc.");
p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_NOT_NULL(test, p);
KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27);
p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
KUNIT_ASSERT_NOT_NULL(test, p);
KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh");
p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_NOT_NULL(test, p);
KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa);
p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY);
KUNIT_ASSERT_NULL(test, p);
p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY);
KUNIT_ASSERT_NOT_NULL(test, p);
network_dir = p->value.dir;
KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid));
p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_NOT_NULL(test, p);
KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_NOT_NULL(test, p);
KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_NOT_NULL(test, p);
KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1);
p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
KUNIT_ASSERT_NOT_NULL(test, p);
KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0);
p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
KUNIT_EXPECT_TRUE(test, !p);
p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
KUNIT_EXPECT_TRUE(test, !p);
tb_property_free_dir(dir);
}
static void tb_test_property_format(struct kunit *test)
{
struct tb_property_dir *dir;
ssize_t block_len;
u32 *block;
int ret, i;
dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
KUNIT_ASSERT_NOT_NULL(test, dir);
ret = tb_property_format_dir(dir, NULL, 0);
KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
block_len = ret;
block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, block);
ret = tb_property_format_dir(dir, block, block_len);
KUNIT_EXPECT_EQ(test, ret, 0);
for (i = 0; i < ARRAY_SIZE(root_directory); i++)
KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
tb_property_free_dir(dir);
}
static void compare_dirs(struct kunit *test, struct tb_property_dir *d1,
struct tb_property_dir *d2)
{
struct tb_property *p1, *p2, *tmp;
int n1, n2, i;
if (d1->uuid) {
KUNIT_ASSERT_NOT_NULL(test, d2->uuid);
KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid));
} else {
KUNIT_ASSERT_NULL(test, d2->uuid);
}
n1 = 0;
tb_property_for_each(d1, tmp)
n1++;
KUNIT_ASSERT_NE(test, n1, 0);
n2 = 0;
tb_property_for_each(d2, tmp)
n2++;
KUNIT_ASSERT_NE(test, n2, 0);
KUNIT_ASSERT_EQ(test, n1, n2);
p1 = NULL;
p2 = NULL;
for (i = 0; i < n1; i++) {
p1 = tb_property_get_next(d1, p1);
KUNIT_ASSERT_NOT_NULL(test, p1);
p2 = tb_property_get_next(d2, p2);
KUNIT_ASSERT_NOT_NULL(test, p2);
KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]);
KUNIT_ASSERT_EQ(test, p1->type, p2->type);
KUNIT_ASSERT_EQ(test, p1->length, p2->length);
switch (p1->type) {
case TB_PROPERTY_TYPE_DIRECTORY:
KUNIT_ASSERT_NOT_NULL(test, p1->value.dir);
KUNIT_ASSERT_NOT_NULL(test, p2->value.dir);
compare_dirs(test, p1->value.dir, p2->value.dir);
break;
case TB_PROPERTY_TYPE_DATA:
KUNIT_ASSERT_NOT_NULL(test, p1->value.data);
KUNIT_ASSERT_NOT_NULL(test, p2->value.data);
KUNIT_ASSERT_TRUE(test,
!memcmp(p1->value.data, p2->value.data,
p1->length * 4)
);
break;
case TB_PROPERTY_TYPE_TEXT:
KUNIT_ASSERT_NOT_NULL(test, p1->value.text);
KUNIT_ASSERT_NOT_NULL(test, p2->value.text);
KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text);
break;
case TB_PROPERTY_TYPE_VALUE:
KUNIT_ASSERT_EQ(test, p1->value.immediate,
p2->value.immediate);
break;
default:
KUNIT_FAIL(test, "unexpected property type");
break;
}
}
}
static void tb_test_property_copy(struct kunit *test)
{
struct tb_property_dir *src, *dst;
u32 *block;
int ret, i;
src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory));
KUNIT_ASSERT_NOT_NULL(test, src);
dst = tb_property_copy_dir(src);
KUNIT_ASSERT_NOT_NULL(test, dst);
/* Compare the structures */
compare_dirs(test, src, dst);
/* Compare the resulting property block */
ret = tb_property_format_dir(dst, NULL, 0);
KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory));
block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, block);
ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory));
KUNIT_EXPECT_TRUE(test, !ret);
for (i = 0; i < ARRAY_SIZE(root_directory); i++)
KUNIT_EXPECT_EQ(test, root_directory[i], block[i]);
tb_property_free_dir(dst);
tb_property_free_dir(src);
}
static struct kunit_case tb_test_cases[] = {
KUNIT_CASE(tb_test_path_basic),
KUNIT_CASE(tb_test_path_not_connected_walk),
KUNIT_CASE(tb_test_path_single_hop_walk),
KUNIT_CASE(tb_test_path_daisy_chain_walk),
KUNIT_CASE(tb_test_path_simple_tree_walk),
KUNIT_CASE(tb_test_path_complex_tree_walk),
KUNIT_CASE(tb_test_path_max_length_walk),
KUNIT_CASE(tb_test_path_not_connected),
KUNIT_CASE(tb_test_path_not_bonded_lane0),
KUNIT_CASE(tb_test_path_not_bonded_lane1),
KUNIT_CASE(tb_test_path_not_bonded_lane1_chain),
KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse),
KUNIT_CASE(tb_test_path_mixed_chain),
KUNIT_CASE(tb_test_path_mixed_chain_reverse),
KUNIT_CASE(tb_test_tunnel_pcie),
KUNIT_CASE(tb_test_tunnel_dp),
KUNIT_CASE(tb_test_tunnel_dp_chain),
KUNIT_CASE(tb_test_tunnel_dp_tree),
KUNIT_CASE(tb_test_tunnel_dp_max_length),
KUNIT_CASE(tb_test_tunnel_3dp),
KUNIT_CASE(tb_test_tunnel_port_on_path),
KUNIT_CASE(tb_test_tunnel_usb3),
KUNIT_CASE(tb_test_tunnel_dma),
KUNIT_CASE(tb_test_tunnel_dma_rx),
KUNIT_CASE(tb_test_tunnel_dma_tx),
KUNIT_CASE(tb_test_tunnel_dma_chain),
KUNIT_CASE(tb_test_tunnel_dma_match),
KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded),
KUNIT_CASE(tb_test_credit_alloc_legacy_bonded),
KUNIT_CASE(tb_test_credit_alloc_pcie),
KUNIT_CASE(tb_test_credit_alloc_without_dp),
KUNIT_CASE(tb_test_credit_alloc_dp),
KUNIT_CASE(tb_test_credit_alloc_usb3),
KUNIT_CASE(tb_test_credit_alloc_dma),
KUNIT_CASE(tb_test_credit_alloc_dma_multiple),
KUNIT_CASE(tb_test_credit_alloc_all),
KUNIT_CASE(tb_test_property_parse),
KUNIT_CASE(tb_test_property_format),
KUNIT_CASE(tb_test_property_copy),
{ }
};
static struct kunit_suite tb_test_suite = {
.name = "thunderbolt",
.test_cases = tb_test_cases,
};
kunit_test_suite(tb_test_suite);
| linux-master | drivers/thunderbolt/test.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt driver - control channel and configuration commands
*
* Copyright (c) 2014 Andreas Noever <[email protected]>
* Copyright (C) 2018, Intel Corporation
*/
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/dmapool.h>
#include <linux/workqueue.h>
#include "ctl.h"
#define TB_CTL_RX_PKG_COUNT 10
#define TB_CTL_RETRIES 4
/**
* struct tb_ctl - Thunderbolt control channel
* @nhi: Pointer to the NHI structure
* @tx: Transmit ring
* @rx: Receive ring
* @frame_pool: DMA pool for control messages
* @rx_packets: Received control messages
* @request_queue_lock: Lock protecting @request_queue
* @request_queue: List of outstanding requests
* @running: Is the control channel running at the moment
* @timeout_msec: Default timeout for non-raw control messages
* @callback: Callback called when hotplug message is received
* @callback_data: Data passed to @callback
*/
struct tb_ctl {
struct tb_nhi *nhi;
struct tb_ring *tx;
struct tb_ring *rx;
struct dma_pool *frame_pool;
struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
struct mutex request_queue_lock;
struct list_head request_queue;
bool running;
int timeout_msec;
event_cb callback;
void *callback_data;
};
#define tb_ctl_WARN(ctl, format, arg...) \
dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
#define tb_ctl_err(ctl, format, arg...) \
dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
#define tb_ctl_warn(ctl, format, arg...) \
dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
#define tb_ctl_info(ctl, format, arg...) \
dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
#define tb_ctl_dbg(ctl, format, arg...) \
dev_dbg(&(ctl)->nhi->pdev->dev, format, ## arg)
static DECLARE_WAIT_QUEUE_HEAD(tb_cfg_request_cancel_queue);
/* Serializes access to request kref_get/put */
static DEFINE_MUTEX(tb_cfg_request_lock);
/**
* tb_cfg_request_alloc() - Allocates a new config request
*
* This is refcounted object so when you are done with this, call
* tb_cfg_request_put() to it.
*/
struct tb_cfg_request *tb_cfg_request_alloc(void)
{
struct tb_cfg_request *req;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return NULL;
kref_init(&req->kref);
return req;
}
/**
* tb_cfg_request_get() - Increase refcount of a request
* @req: Request whose refcount is increased
*/
void tb_cfg_request_get(struct tb_cfg_request *req)
{
mutex_lock(&tb_cfg_request_lock);
kref_get(&req->kref);
mutex_unlock(&tb_cfg_request_lock);
}
static void tb_cfg_request_destroy(struct kref *kref)
{
struct tb_cfg_request *req = container_of(kref, typeof(*req), kref);
kfree(req);
}
/**
* tb_cfg_request_put() - Decrease refcount and possibly release the request
* @req: Request whose refcount is decreased
*
* Call this function when you are done with the request. When refcount
* goes to %0 the object is released.
*/
void tb_cfg_request_put(struct tb_cfg_request *req)
{
mutex_lock(&tb_cfg_request_lock);
kref_put(&req->kref, tb_cfg_request_destroy);
mutex_unlock(&tb_cfg_request_lock);
}
static int tb_cfg_request_enqueue(struct tb_ctl *ctl,
struct tb_cfg_request *req)
{
WARN_ON(test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags));
WARN_ON(req->ctl);
mutex_lock(&ctl->request_queue_lock);
if (!ctl->running) {
mutex_unlock(&ctl->request_queue_lock);
return -ENOTCONN;
}
req->ctl = ctl;
list_add_tail(&req->list, &ctl->request_queue);
set_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
mutex_unlock(&ctl->request_queue_lock);
return 0;
}
static void tb_cfg_request_dequeue(struct tb_cfg_request *req)
{
struct tb_ctl *ctl = req->ctl;
mutex_lock(&ctl->request_queue_lock);
list_del(&req->list);
clear_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
if (test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
wake_up(&tb_cfg_request_cancel_queue);
mutex_unlock(&ctl->request_queue_lock);
}
static bool tb_cfg_request_is_active(struct tb_cfg_request *req)
{
return test_bit(TB_CFG_REQUEST_ACTIVE, &req->flags);
}
static struct tb_cfg_request *
tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg)
{
struct tb_cfg_request *req = NULL, *iter;
mutex_lock(&pkg->ctl->request_queue_lock);
list_for_each_entry(iter, &pkg->ctl->request_queue, list) {
tb_cfg_request_get(iter);
if (iter->match(iter, pkg)) {
req = iter;
break;
}
tb_cfg_request_put(iter);
}
mutex_unlock(&pkg->ctl->request_queue_lock);
return req;
}
/* utility functions */
static int check_header(const struct ctl_pkg *pkg, u32 len,
enum tb_cfg_pkg_type type, u64 route)
{
struct tb_cfg_header *header = pkg->buffer;
/* check frame, TODO: frame flags */
if (WARN(len != pkg->frame.size,
"wrong framesize (expected %#x, got %#x)\n",
len, pkg->frame.size))
return -EIO;
if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
type, pkg->frame.eof))
return -EIO;
if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
pkg->frame.sof))
return -EIO;
/* check header */
if (WARN(header->unknown != 1 << 9,
"header->unknown is %#x\n", header->unknown))
return -EIO;
if (WARN(route != tb_cfg_get_route(header),
"wrong route (expected %llx, got %llx)",
route, tb_cfg_get_route(header)))
return -EIO;
return 0;
}
static int check_config_address(struct tb_cfg_address addr,
enum tb_cfg_space space, u32 offset,
u32 length)
{
if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
return -EIO;
if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
space, addr.space))
return -EIO;
if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
offset, addr.offset))
return -EIO;
if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
length, addr.length))
return -EIO;
/*
* We cannot check addr->port as it is set to the upstream port of the
* sender.
*/
return 0;
}
static struct tb_cfg_result decode_error(const struct ctl_pkg *response)
{
struct cfg_error_pkg *pkg = response->buffer;
struct tb_cfg_result res = { 0 };
res.response_route = tb_cfg_get_route(&pkg->header);
res.response_port = 0;
res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
tb_cfg_get_route(&pkg->header));
if (res.err)
return res;
res.err = 1;
res.tb_error = pkg->error;
res.response_port = pkg->port;
return res;
}
static struct tb_cfg_result parse_header(const struct ctl_pkg *pkg, u32 len,
enum tb_cfg_pkg_type type, u64 route)
{
struct tb_cfg_header *header = pkg->buffer;
struct tb_cfg_result res = { 0 };
if (pkg->frame.eof == TB_CFG_PKG_ERROR)
return decode_error(pkg);
res.response_port = 0; /* will be updated later for cfg_read/write */
res.response_route = tb_cfg_get_route(header);
res.err = check_header(pkg, len, type, route);
return res;
}
static void tb_cfg_print_error(struct tb_ctl *ctl,
const struct tb_cfg_result *res)
{
WARN_ON(res->err != 1);
switch (res->tb_error) {
case TB_CFG_ERROR_PORT_NOT_CONNECTED:
/* Port is not connected. This can happen during surprise
* removal. Do not warn. */
return;
case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
/*
* Invalid cfg_space/offset/length combination in
* cfg_read/cfg_write.
*/
tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n",
res->response_route, res->response_port);
return;
case TB_CFG_ERROR_NO_SUCH_PORT:
/*
* - The route contains a non-existent port.
* - The route contains a non-PHY port (e.g. PCIe).
* - The port in cfg_read/cfg_write does not exist.
*/
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
res->response_route, res->response_port);
return;
case TB_CFG_ERROR_LOOP:
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
res->response_route, res->response_port);
return;
case TB_CFG_ERROR_LOCK:
tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
res->response_route, res->response_port);
return;
default:
/* 5,6,7,9 and 11 are also valid error codes */
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
res->response_route, res->response_port);
return;
}
}
static __be32 tb_crc(const void *data, size_t len)
{
return cpu_to_be32(~__crc32c_le(~0, data, len));
}
static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
{
if (pkg) {
dma_pool_free(pkg->ctl->frame_pool,
pkg->buffer, pkg->frame.buffer_phy);
kfree(pkg);
}
}
static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
{
struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
if (!pkg)
return NULL;
pkg->ctl = ctl;
pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
&pkg->frame.buffer_phy);
if (!pkg->buffer) {
kfree(pkg);
return NULL;
}
return pkg;
}
/* RX/TX handling */
static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
tb_ctl_pkg_free(pkg);
}
/*
* tb_cfg_tx() - transmit a packet on the control channel
*
* len must be a multiple of four.
*
* Return: Returns 0 on success or an error code on failure.
*/
static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
enum tb_cfg_pkg_type type)
{
int res;
struct ctl_pkg *pkg;
if (len % 4 != 0) { /* required for le->be conversion */
tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
return -EINVAL;
}
if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
len, TB_FRAME_SIZE - 4);
return -EINVAL;
}
pkg = tb_ctl_pkg_alloc(ctl);
if (!pkg)
return -ENOMEM;
pkg->frame.callback = tb_ctl_tx_callback;
pkg->frame.size = len + 4;
pkg->frame.sof = type;
pkg->frame.eof = type;
cpu_to_be32_array(pkg->buffer, data, len / 4);
*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
res = tb_ring_tx(ctl->tx, &pkg->frame);
if (res) /* ring is stopped */
tb_ctl_pkg_free(pkg);
return res;
}
/*
* tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback
*/
static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
struct ctl_pkg *pkg, size_t size)
{
return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
}
static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
{
tb_ring_rx(pkg->ctl->rx, &pkg->frame); /*
* We ignore failures during stop.
* All rx packets are referenced
* from ctl->rx_packets, so we do
* not loose them.
*/
}
static int tb_async_error(const struct ctl_pkg *pkg)
{
const struct cfg_error_pkg *error = pkg->buffer;
if (pkg->frame.eof != TB_CFG_PKG_ERROR)
return false;
switch (error->error) {
case TB_CFG_ERROR_LINK_ERROR:
case TB_CFG_ERROR_HEC_ERROR_DETECTED:
case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
case TB_CFG_ERROR_DP_BW:
case TB_CFG_ERROR_ROP_CMPLT:
case TB_CFG_ERROR_POP_CMPLT:
case TB_CFG_ERROR_PCIE_WAKE:
case TB_CFG_ERROR_DP_CON_CHANGE:
case TB_CFG_ERROR_DPTX_DISCOVERY:
case TB_CFG_ERROR_LINK_RECOVERY:
case TB_CFG_ERROR_ASYM_LINK:
return true;
default:
return false;
}
}
static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
bool canceled)
{
struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
struct tb_cfg_request *req;
__be32 crc32;
if (canceled)
return; /*
* ring is stopped, packet is referenced from
* ctl->rx_packets.
*/
if (frame->size < 4 || frame->size % 4 != 0) {
tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
frame->size);
goto rx;
}
frame->size -= 4; /* remove checksum */
crc32 = tb_crc(pkg->buffer, frame->size);
be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
switch (frame->eof) {
case TB_CFG_PKG_READ:
case TB_CFG_PKG_WRITE:
case TB_CFG_PKG_ERROR:
case TB_CFG_PKG_OVERRIDE:
case TB_CFG_PKG_RESET:
if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
tb_ctl_err(pkg->ctl,
"RX: checksum mismatch, dropping packet\n");
goto rx;
}
if (tb_async_error(pkg)) {
tb_ctl_handle_event(pkg->ctl, frame->eof,
pkg, frame->size);
goto rx;
}
break;
case TB_CFG_PKG_EVENT:
case TB_CFG_PKG_XDOMAIN_RESP:
case TB_CFG_PKG_XDOMAIN_REQ:
if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
tb_ctl_err(pkg->ctl,
"RX: checksum mismatch, dropping packet\n");
goto rx;
}
fallthrough;
case TB_CFG_PKG_ICM_EVENT:
if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size))
goto rx;
break;
default:
break;
}
/*
* The received packet will be processed only if there is an
* active request and that the packet is what is expected. This
* prevents packets such as replies coming after timeout has
* triggered from messing with the active requests.
*/
req = tb_cfg_request_find(pkg->ctl, pkg);
if (req) {
if (req->copy(req, pkg))
schedule_work(&req->work);
tb_cfg_request_put(req);
}
rx:
tb_ctl_rx_submit(pkg);
}
static void tb_cfg_request_work(struct work_struct *work)
{
struct tb_cfg_request *req = container_of(work, typeof(*req), work);
if (!test_bit(TB_CFG_REQUEST_CANCELED, &req->flags))
req->callback(req->callback_data);
tb_cfg_request_dequeue(req);
tb_cfg_request_put(req);
}
/**
* tb_cfg_request() - Start control request not waiting for it to complete
* @ctl: Control channel to use
* @req: Request to start
* @callback: Callback called when the request is completed
* @callback_data: Data to be passed to @callback
*
* This queues @req on the given control channel without waiting for it
* to complete. When the request completes @callback is called.
*/
int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req,
void (*callback)(void *), void *callback_data)
{
int ret;
req->flags = 0;
req->callback = callback;
req->callback_data = callback_data;
INIT_WORK(&req->work, tb_cfg_request_work);
INIT_LIST_HEAD(&req->list);
tb_cfg_request_get(req);
ret = tb_cfg_request_enqueue(ctl, req);
if (ret)
goto err_put;
ret = tb_ctl_tx(ctl, req->request, req->request_size,
req->request_type);
if (ret)
goto err_dequeue;
if (!req->response)
schedule_work(&req->work);
return 0;
err_dequeue:
tb_cfg_request_dequeue(req);
err_put:
tb_cfg_request_put(req);
return ret;
}
/**
* tb_cfg_request_cancel() - Cancel a control request
* @req: Request to cancel
* @err: Error to assign to the request
*
* This function can be used to cancel ongoing request. It will wait
* until the request is not active anymore.
*/
void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
{
set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
schedule_work(&req->work);
wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
req->result.err = err;
}
static void tb_cfg_request_complete(void *data)
{
complete(data);
}
/**
* tb_cfg_request_sync() - Start control request and wait until it completes
* @ctl: Control channel to use
* @req: Request to start
* @timeout_msec: Timeout how long to wait @req to complete
*
* Starts a control request and waits until it completes. If timeout
* triggers the request is canceled before function returns. Note the
* caller needs to make sure only one message for given switch is active
* at a time.
*/
struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
struct tb_cfg_request *req,
int timeout_msec)
{
unsigned long timeout = msecs_to_jiffies(timeout_msec);
struct tb_cfg_result res = { 0 };
DECLARE_COMPLETION_ONSTACK(done);
int ret;
ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done);
if (ret) {
res.err = ret;
return res;
}
if (!wait_for_completion_timeout(&done, timeout))
tb_cfg_request_cancel(req, -ETIMEDOUT);
flush_work(&req->work);
return req->result;
}
/* public interface, alloc/start/stop/free */
/**
* tb_ctl_alloc() - allocate a control channel
* @nhi: Pointer to NHI
* @timeout_msec: Default timeout used with non-raw control messages
* @cb: Callback called for plug events
* @cb_data: Data passed to @cb
*
* cb will be invoked once for every hot plug event.
*
* Return: Returns a pointer on success or NULL on failure.
*/
struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
void *cb_data)
{
int i;
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl)
return NULL;
ctl->nhi = nhi;
ctl->timeout_msec = timeout_msec;
ctl->callback = cb;
ctl->callback_data = cb_data;
mutex_init(&ctl->request_queue_lock);
INIT_LIST_HEAD(&ctl->request_queue);
ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
TB_FRAME_SIZE, 4, 0);
if (!ctl->frame_pool)
goto err;
ctl->tx = tb_ring_alloc_tx(nhi, 0, 10, RING_FLAG_NO_SUSPEND);
if (!ctl->tx)
goto err;
ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff,
0xffff, NULL, NULL);
if (!ctl->rx)
goto err;
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
if (!ctl->rx_packets[i])
goto err;
ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
}
tb_ctl_dbg(ctl, "control channel created\n");
return ctl;
err:
tb_ctl_free(ctl);
return NULL;
}
/**
* tb_ctl_free() - free a control channel
* @ctl: Control channel to free
*
* Must be called after tb_ctl_stop.
*
* Must NOT be called from ctl->callback.
*/
void tb_ctl_free(struct tb_ctl *ctl)
{
int i;
if (!ctl)
return;
if (ctl->rx)
tb_ring_free(ctl->rx);
if (ctl->tx)
tb_ring_free(ctl->tx);
/* free RX packets */
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
tb_ctl_pkg_free(ctl->rx_packets[i]);
dma_pool_destroy(ctl->frame_pool);
kfree(ctl);
}
/**
* tb_ctl_start() - start/resume the control channel
* @ctl: Control channel to start
*/
void tb_ctl_start(struct tb_ctl *ctl)
{
int i;
tb_ctl_dbg(ctl, "control channel starting...\n");
tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
tb_ring_start(ctl->rx);
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
tb_ctl_rx_submit(ctl->rx_packets[i]);
ctl->running = true;
}
/**
* tb_ctl_stop() - pause the control channel
* @ctl: Control channel to stop
*
* All invocations of ctl->callback will have finished after this method
* returns.
*
* Must NOT be called from ctl->callback.
*/
void tb_ctl_stop(struct tb_ctl *ctl)
{
mutex_lock(&ctl->request_queue_lock);
ctl->running = false;
mutex_unlock(&ctl->request_queue_lock);
tb_ring_stop(ctl->rx);
tb_ring_stop(ctl->tx);
if (!list_empty(&ctl->request_queue))
tb_ctl_WARN(ctl, "dangling request in request_queue\n");
INIT_LIST_HEAD(&ctl->request_queue);
tb_ctl_dbg(ctl, "control channel stopped\n");
}
/* public interface, commands */
/**
* tb_cfg_ack_notification() - Ack notification
* @ctl: Control channel to use
* @route: Router that originated the event
* @error: Pointer to the notification package
*
* Call this as response for non-plug notification to ack it. Returns
* %0 on success or an error code on failure.
*/
int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route,
const struct cfg_error_pkg *error)
{
struct cfg_ack_pkg pkg = {
.header = tb_cfg_make_header(route),
};
const char *name;
switch (error->error) {
case TB_CFG_ERROR_LINK_ERROR:
name = "link error";
break;
case TB_CFG_ERROR_HEC_ERROR_DETECTED:
name = "HEC error";
break;
case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
name = "flow control error";
break;
case TB_CFG_ERROR_DP_BW:
name = "DP_BW";
break;
case TB_CFG_ERROR_ROP_CMPLT:
name = "router operation completion";
break;
case TB_CFG_ERROR_POP_CMPLT:
name = "port operation completion";
break;
case TB_CFG_ERROR_PCIE_WAKE:
name = "PCIe wake";
break;
case TB_CFG_ERROR_DP_CON_CHANGE:
name = "DP connector change";
break;
case TB_CFG_ERROR_DPTX_DISCOVERY:
name = "DPTX discovery";
break;
case TB_CFG_ERROR_LINK_RECOVERY:
name = "link recovery";
break;
case TB_CFG_ERROR_ASYM_LINK:
name = "asymmetric link";
break;
default:
name = "unknown";
break;
}
tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name,
error->error, route);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_NOTIFY_ACK);
}
/**
* tb_cfg_ack_plug() - Ack hot plug/unplug event
* @ctl: Control channel to use
* @route: Router that originated the event
* @port: Port where the hot plug/unplug happened
* @unplug: Ack hot plug or unplug
*
* Call this as response for hot plug/unplug event to ack it.
* Returns %0 on success or an error code on failure.
*/
int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
{
struct cfg_error_pkg pkg = {
.header = tb_cfg_make_header(route),
.port = port,
.error = TB_CFG_ERROR_ACK_PLUG_EVENT,
.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
: TB_CFG_ERROR_PG_HOT_PLUG,
};
tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n",
unplug ? "un" : "", route, port);
return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}
static bool tb_cfg_match(const struct tb_cfg_request *req,
const struct ctl_pkg *pkg)
{
u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
if (pkg->frame.eof == TB_CFG_PKG_ERROR)
return true;
if (pkg->frame.eof != req->response_type)
return false;
if (route != tb_cfg_get_route(req->request))
return false;
if (pkg->frame.size != req->response_size)
return false;
if (pkg->frame.eof == TB_CFG_PKG_READ ||
pkg->frame.eof == TB_CFG_PKG_WRITE) {
const struct cfg_read_pkg *req_hdr = req->request;
const struct cfg_read_pkg *res_hdr = pkg->buffer;
if (req_hdr->addr.seq != res_hdr->addr.seq)
return false;
}
return true;
}
static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
{
struct tb_cfg_result res;
/* Now make sure it is in expected format */
res = parse_header(pkg, req->response_size, req->response_type,
tb_cfg_get_route(req->request));
if (!res.err)
memcpy(req->response, pkg->buffer, req->response_size);
req->result = res;
/* Always complete when first response is received */
return true;
}
/**
* tb_cfg_reset() - send a reset packet and wait for a response
* @ctl: Control channel pointer
* @route: Router string for the router to send reset
*
* If the switch at route is incorrectly configured then we will not receive a
* reply (even though the switch will reset). The caller should check for
* -ETIMEDOUT and attempt to reconfigure the switch.
*/
struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
{
struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) };
struct tb_cfg_result res = { 0 };
struct tb_cfg_header reply;
struct tb_cfg_request *req;
req = tb_cfg_request_alloc();
if (!req) {
res.err = -ENOMEM;
return res;
}
req->match = tb_cfg_match;
req->copy = tb_cfg_copy;
req->request = &request;
req->request_size = sizeof(request);
req->request_type = TB_CFG_PKG_RESET;
req->response = &reply;
req->response_size = sizeof(reply);
req->response_type = TB_CFG_PKG_RESET;
res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
tb_cfg_request_put(req);
return res;
}
/**
* tb_cfg_read_raw() - read from config space into buffer
* @ctl: Pointer to the control channel
* @buffer: Buffer where the data is read
* @route: Route string of the router
* @port: Port number when reading from %TB_CFG_PORT, %0 otherwise
* @space: Config space selector
* @offset: Dword word offset of the register to start reading
* @length: Number of dwords to read
* @timeout_msec: Timeout in ms how long to wait for the response
*
* Reads from router config space without translating the possible error.
*/
struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port, enum tb_cfg_space space,
u32 offset, u32 length, int timeout_msec)
{
struct tb_cfg_result res = { 0 };
struct cfg_read_pkg request = {
.header = tb_cfg_make_header(route),
.addr = {
.port = port,
.space = space,
.offset = offset,
.length = length,
},
};
struct cfg_write_pkg reply;
int retries = 0;
while (retries < TB_CTL_RETRIES) {
struct tb_cfg_request *req;
req = tb_cfg_request_alloc();
if (!req) {
res.err = -ENOMEM;
return res;
}
request.addr.seq = retries++;
req->match = tb_cfg_match;
req->copy = tb_cfg_copy;
req->request = &request;
req->request_size = sizeof(request);
req->request_type = TB_CFG_PKG_READ;
req->response = &reply;
req->response_size = 12 + 4 * length;
req->response_type = TB_CFG_PKG_READ;
res = tb_cfg_request_sync(ctl, req, timeout_msec);
tb_cfg_request_put(req);
if (res.err != -ETIMEDOUT)
break;
/* Wait a bit (arbitrary time) until we send a retry */
usleep_range(10, 100);
}
if (res.err)
return res;
res.response_port = reply.addr.port;
res.err = check_config_address(reply.addr, space, offset, length);
if (!res.err)
memcpy(buffer, &reply.data, 4 * length);
return res;
}
/**
* tb_cfg_write_raw() - write from buffer into config space
* @ctl: Pointer to the control channel
* @buffer: Data to write
* @route: Route string of the router
* @port: Port number when writing to %TB_CFG_PORT, %0 otherwise
* @space: Config space selector
* @offset: Dword word offset of the register to start writing
* @length: Number of dwords to write
* @timeout_msec: Timeout in ms how long to wait for the response
*
* Writes to router config space without translating the possible error.
*/
struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer,
u64 route, u32 port, enum tb_cfg_space space,
u32 offset, u32 length, int timeout_msec)
{
struct tb_cfg_result res = { 0 };
struct cfg_write_pkg request = {
.header = tb_cfg_make_header(route),
.addr = {
.port = port,
.space = space,
.offset = offset,
.length = length,
},
};
struct cfg_read_pkg reply;
int retries = 0;
memcpy(&request.data, buffer, length * 4);
while (retries < TB_CTL_RETRIES) {
struct tb_cfg_request *req;
req = tb_cfg_request_alloc();
if (!req) {
res.err = -ENOMEM;
return res;
}
request.addr.seq = retries++;
req->match = tb_cfg_match;
req->copy = tb_cfg_copy;
req->request = &request;
req->request_size = 12 + 4 * length;
req->request_type = TB_CFG_PKG_WRITE;
req->response = &reply;
req->response_size = sizeof(reply);
req->response_type = TB_CFG_PKG_WRITE;
res = tb_cfg_request_sync(ctl, req, timeout_msec);
tb_cfg_request_put(req);
if (res.err != -ETIMEDOUT)
break;
/* Wait a bit (arbitrary time) until we send a retry */
usleep_range(10, 100);
}
if (res.err)
return res;
res.response_port = reply.addr.port;
res.err = check_config_address(reply.addr, space, offset, length);
return res;
}
static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space,
const struct tb_cfg_result *res)
{
/*
* For unimplemented ports access to port config space may return
* TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is
* set to TB_TYPE_INACTIVE). In the former case return -ENODEV so
* that the caller can mark the port as disabled.
*/
if (space == TB_CFG_PORT &&
res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE)
return -ENODEV;
tb_cfg_print_error(ctl, res);
if (res->tb_error == TB_CFG_ERROR_LOCK)
return -EACCES;
if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED)
return -ENOTCONN;
return -EIO;
}
int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length)
{
struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
space, offset, length, ctl->timeout_msec);
switch (res.err) {
case 0:
/* Success */
break;
case 1:
/* Thunderbolt error, tb_error holds the actual number */
return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n",
route, space, offset);
break;
default:
WARN(1, "tb_cfg_read: %d\n", res.err);
break;
}
return res.err;
}
int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port,
enum tb_cfg_space space, u32 offset, u32 length)
{
struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
space, offset, length, ctl->timeout_msec);
switch (res.err) {
case 0:
/* Success */
break;
case 1:
/* Thunderbolt error, tb_error holds the actual number */
return tb_cfg_get_error(ctl, space, &res);
case -ETIMEDOUT:
tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n",
route, space, offset);
break;
default:
WARN(1, "tb_cfg_write: %d\n", res.err);
break;
}
return res.err;
}
/**
* tb_cfg_get_upstream_port() - get upstream port number of switch at route
* @ctl: Pointer to the control channel
* @route: Route string of the router
*
* Reads the first dword from the switches TB_CFG_SWITCH config area and
* returns the port number from which the reply originated.
*
* Return: Returns the upstream port number on success or an error code on
* failure.
*/
int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
{
u32 dummy;
struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
TB_CFG_SWITCH, 0, 1,
ctl->timeout_msec);
if (res.err == 1)
return -EIO;
if (res.err)
return res.err;
return res.response_port;
}
| linux-master | drivers/thunderbolt/ctl.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt driver - Tunneling support
*
* Copyright (c) 2014 Andreas Noever <[email protected]>
* Copyright (C) 2019, Intel Corporation
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/ktime.h>
#include <linux/string_helpers.h>
#include "tunnel.h"
#include "tb.h"
/* PCIe adapters use always HopID of 8 for both directions */
#define TB_PCI_HOPID 8
#define TB_PCI_PATH_DOWN 0
#define TB_PCI_PATH_UP 1
/* USB3 adapters use always HopID of 8 for both directions */
#define TB_USB3_HOPID 8
#define TB_USB3_PATH_DOWN 0
#define TB_USB3_PATH_UP 1
/* DP adapters use HopID 8 for AUX and 9 for Video */
#define TB_DP_AUX_TX_HOPID 8
#define TB_DP_AUX_RX_HOPID 8
#define TB_DP_VIDEO_HOPID 9
#define TB_DP_VIDEO_PATH_OUT 0
#define TB_DP_AUX_PATH_OUT 1
#define TB_DP_AUX_PATH_IN 2
/* Minimum number of credits needed for PCIe path */
#define TB_MIN_PCIE_CREDITS 6U
/*
* Number of credits we try to allocate for each DMA path if not limited
* by the host router baMaxHI.
*/
#define TB_DMA_CREDITS 14
/* Minimum number of credits for DMA path */
#define TB_MIN_DMA_CREDITS 1
static unsigned int dma_credits = TB_DMA_CREDITS;
module_param(dma_credits, uint, 0444);
MODULE_PARM_DESC(dma_credits, "specify custom credits for DMA tunnels (default: "
__MODULE_STRING(TB_DMA_CREDITS) ")");
static bool bw_alloc_mode = true;
module_param(bw_alloc_mode, bool, 0444);
MODULE_PARM_DESC(bw_alloc_mode,
"enable bandwidth allocation mode if supported (default: true)");
static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" };
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
do { \
struct tb_tunnel *__tunnel = (tunnel); \
level(__tunnel->tb, "%llx:%u <-> %llx:%u (%s): " fmt, \
tb_route(__tunnel->src_port->sw), \
__tunnel->src_port->port, \
tb_route(__tunnel->dst_port->sw), \
__tunnel->dst_port->port, \
tb_tunnel_names[__tunnel->type], \
## arg); \
} while (0)
#define tb_tunnel_WARN(tunnel, fmt, arg...) \
__TB_TUNNEL_PRINT(tb_WARN, tunnel, fmt, ##arg)
#define tb_tunnel_warn(tunnel, fmt, arg...) \
__TB_TUNNEL_PRINT(tb_warn, tunnel, fmt, ##arg)
#define tb_tunnel_info(tunnel, fmt, arg...) \
__TB_TUNNEL_PRINT(tb_info, tunnel, fmt, ##arg)
#define tb_tunnel_dbg(tunnel, fmt, arg...) \
__TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg)
static inline unsigned int tb_usable_credits(const struct tb_port *port)
{
return port->total_credits - port->ctl_credits;
}
/**
* tb_available_credits() - Available credits for PCIe and DMA
* @port: Lane adapter to check
* @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP
* streams possible through this lane adapter
*/
static unsigned int tb_available_credits(const struct tb_port *port,
size_t *max_dp_streams)
{
const struct tb_switch *sw = port->sw;
int credits, usb3, pcie, spare;
size_t ndp;
usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0;
pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0;
if (tb_acpi_is_xdomain_allowed()) {
spare = min_not_zero(sw->max_dma_credits, dma_credits);
/* Add some credits for potential second DMA tunnel */
spare += TB_MIN_DMA_CREDITS;
} else {
spare = 0;
}
credits = tb_usable_credits(port);
if (tb_acpi_may_tunnel_dp()) {
/*
* Maximum number of DP streams possible through the
* lane adapter.
*/
if (sw->min_dp_aux_credits + sw->min_dp_main_credits)
ndp = (credits - (usb3 + pcie + spare)) /
(sw->min_dp_aux_credits + sw->min_dp_main_credits);
else
ndp = 0;
} else {
ndp = 0;
}
credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits);
credits -= usb3;
if (max_dp_streams)
*max_dp_streams = ndp;
return credits > 0 ? credits : 0;
}
static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths,
enum tb_tunnel_type type)
{
struct tb_tunnel *tunnel;
tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
if (!tunnel)
return NULL;
tunnel->paths = kcalloc(npaths, sizeof(tunnel->paths[0]), GFP_KERNEL);
if (!tunnel->paths) {
tb_tunnel_free(tunnel);
return NULL;
}
INIT_LIST_HEAD(&tunnel->list);
tunnel->tb = tb;
tunnel->npaths = npaths;
tunnel->type = type;
return tunnel;
}
static int tb_pci_set_ext_encapsulation(struct tb_tunnel *tunnel, bool enable)
{
int ret;
/* Only supported of both routers are at least USB4 v2 */
if (usb4_switch_version(tunnel->src_port->sw) < 2 ||
usb4_switch_version(tunnel->dst_port->sw) < 2)
return 0;
ret = usb4_pci_port_set_ext_encapsulation(tunnel->src_port, enable);
if (ret)
return ret;
ret = usb4_pci_port_set_ext_encapsulation(tunnel->dst_port, enable);
if (ret)
return ret;
tb_tunnel_dbg(tunnel, "extended encapsulation %s\n",
str_enabled_disabled(enable));
return 0;
}
static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate)
{
int res;
if (activate) {
res = tb_pci_set_ext_encapsulation(tunnel, activate);
if (res)
return res;
}
res = tb_pci_port_enable(tunnel->src_port, activate);
if (res)
return res;
if (tb_port_is_pcie_up(tunnel->dst_port)) {
res = tb_pci_port_enable(tunnel->dst_port, activate);
if (res)
return res;
}
return activate ? 0 : tb_pci_set_ext_encapsulation(tunnel, activate);
}
static int tb_pci_init_credits(struct tb_path_hop *hop)
{
struct tb_port *port = hop->in_port;
struct tb_switch *sw = port->sw;
unsigned int credits;
if (tb_port_use_credit_allocation(port)) {
unsigned int available;
available = tb_available_credits(port, NULL);
credits = min(sw->max_pcie_credits, available);
if (credits < TB_MIN_PCIE_CREDITS)
return -ENOSPC;
credits = max(TB_MIN_PCIE_CREDITS, credits);
} else {
if (tb_port_is_null(port))
credits = port->bonded ? 32 : 16;
else
credits = 7;
}
hop->initial_credits = credits;
return 0;
}
static int tb_pci_init_path(struct tb_path *path)
{
struct tb_path_hop *hop;
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
path->priority = 3;
path->weight = 1;
path->drop_packages = 0;
tb_path_for_each_hop(path, hop) {
int ret;
ret = tb_pci_init_credits(hop);
if (ret)
return ret;
}
return 0;
}
/**
* tb_tunnel_discover_pci() - Discover existing PCIe tunnels
* @tb: Pointer to the domain structure
* @down: PCIe downstream adapter
* @alloc_hopid: Allocate HopIDs from visited ports
*
* If @down adapter is active, follows the tunnel to the PCIe upstream
* adapter and back. Returns the discovered tunnel or %NULL if there was
* no tunnel.
*/
struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
bool alloc_hopid)
{
struct tb_tunnel *tunnel;
struct tb_path *path;
if (!tb_pci_port_is_enabled(down))
return NULL;
tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
if (!tunnel)
return NULL;
tunnel->activate = tb_pci_activate;
tunnel->src_port = down;
/*
* Discover both paths even if they are not complete. We will
* clean them up by calling tb_tunnel_deactivate() below in that
* case.
*/
path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
&tunnel->dst_port, "PCIe Up", alloc_hopid);
if (!path) {
/* Just disable the downstream port */
tb_pci_port_enable(down, false);
goto err_free;
}
tunnel->paths[TB_PCI_PATH_UP] = path;
if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]))
goto err_free;
path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
"PCIe Down", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_PCI_PATH_DOWN] = path;
if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]))
goto err_deactivate;
/* Validate that the tunnel is complete */
if (!tb_port_is_pcie_up(tunnel->dst_port)) {
tb_port_warn(tunnel->dst_port,
"path does not end on a PCIe adapter, cleaning up\n");
goto err_deactivate;
}
if (down != tunnel->src_port) {
tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
goto err_deactivate;
}
if (!tb_pci_port_is_enabled(tunnel->dst_port)) {
tb_tunnel_warn(tunnel,
"tunnel is not fully activated, cleaning up\n");
goto err_deactivate;
}
tb_tunnel_dbg(tunnel, "discovered\n");
return tunnel;
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
tb_tunnel_free(tunnel);
return NULL;
}
/**
* tb_tunnel_alloc_pci() - allocate a pci tunnel
* @tb: Pointer to the domain structure
* @up: PCIe upstream adapter port
* @down: PCIe downstream adapter port
*
* Allocate a PCI tunnel. The ports must be of type TB_TYPE_PCIE_UP and
* TB_TYPE_PCIE_DOWN.
*
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down)
{
struct tb_tunnel *tunnel;
struct tb_path *path;
tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_PCI);
if (!tunnel)
return NULL;
tunnel->activate = tb_pci_activate;
tunnel->src_port = down;
tunnel->dst_port = up;
path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0,
"PCIe Down");
if (!path)
goto err_free;
tunnel->paths[TB_PCI_PATH_DOWN] = path;
if (tb_pci_init_path(path))
goto err_free;
path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0,
"PCIe Up");
if (!path)
goto err_free;
tunnel->paths[TB_PCI_PATH_UP] = path;
if (tb_pci_init_path(path))
goto err_free;
return tunnel;
err_free:
tb_tunnel_free(tunnel);
return NULL;
}
static bool tb_dp_is_usb4(const struct tb_switch *sw)
{
/* Titan Ridge DP adapters need the same treatment as USB4 */
return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
}
static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out,
int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
u32 val;
int ret;
/* Both ends need to support this */
if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw))
return 0;
ret = tb_port_read(out, &val, TB_CFG_PORT,
out->cap_adap + DP_STATUS_CTRL, 1);
if (ret)
return ret;
val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS;
ret = tb_port_write(out, &val, TB_CFG_PORT,
out->cap_adap + DP_STATUS_CTRL, 1);
if (ret)
return ret;
do {
ret = tb_port_read(out, &val, TB_CFG_PORT,
out->cap_adap + DP_STATUS_CTRL, 1);
if (ret)
return ret;
if (!(val & DP_STATUS_CTRL_CMHS))
return 0;
usleep_range(100, 150);
} while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
}
/*
* Returns maximum possible rate from capability supporting only DP 2.0
* and below. Used when DP BW allocation mode is not enabled.
*/
static inline u32 tb_dp_cap_get_rate(u32 val)
{
u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT;
switch (rate) {
case DP_COMMON_CAP_RATE_RBR:
return 1620;
case DP_COMMON_CAP_RATE_HBR:
return 2700;
case DP_COMMON_CAP_RATE_HBR2:
return 5400;
case DP_COMMON_CAP_RATE_HBR3:
return 8100;
default:
return 0;
}
}
/*
* Returns maximum possible rate from capability supporting DP 2.1
* UHBR20, 13.5 and 10 rates as well. Use only when DP BW allocation
* mode is enabled.
*/
static inline u32 tb_dp_cap_get_rate_ext(u32 val)
{
if (val & DP_COMMON_CAP_UHBR20)
return 20000;
else if (val & DP_COMMON_CAP_UHBR13_5)
return 13500;
else if (val & DP_COMMON_CAP_UHBR10)
return 10000;
return tb_dp_cap_get_rate(val);
}
static inline bool tb_dp_is_uhbr_rate(unsigned int rate)
{
return rate >= 10000;
}
static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate)
{
val &= ~DP_COMMON_CAP_RATE_MASK;
switch (rate) {
default:
WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate);
fallthrough;
case 1620:
val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT;
break;
case 2700:
val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT;
break;
case 5400:
val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT;
break;
case 8100:
val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT;
break;
}
return val;
}
static inline u32 tb_dp_cap_get_lanes(u32 val)
{
u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT;
switch (lanes) {
case DP_COMMON_CAP_1_LANE:
return 1;
case DP_COMMON_CAP_2_LANES:
return 2;
case DP_COMMON_CAP_4_LANES:
return 4;
default:
return 0;
}
}
static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes)
{
val &= ~DP_COMMON_CAP_LANES_MASK;
switch (lanes) {
default:
WARN(1, "invalid number of lanes %u passed, defaulting to 1\n",
lanes);
fallthrough;
case 1:
val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT;
break;
case 2:
val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT;
break;
case 4:
val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT;
break;
}
return val;
}
static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes)
{
/* Tunneling removes the DP 8b/10b 128/132b encoding */
if (tb_dp_is_uhbr_rate(rate))
return rate * lanes * 128 / 132;
return rate * lanes * 8 / 10;
}
static int tb_dp_reduce_bandwidth(int max_bw, u32 in_rate, u32 in_lanes,
u32 out_rate, u32 out_lanes, u32 *new_rate,
u32 *new_lanes)
{
static const u32 dp_bw[][2] = {
/* Mb/s, lanes */
{ 8100, 4 }, /* 25920 Mb/s */
{ 5400, 4 }, /* 17280 Mb/s */
{ 8100, 2 }, /* 12960 Mb/s */
{ 2700, 4 }, /* 8640 Mb/s */
{ 5400, 2 }, /* 8640 Mb/s */
{ 8100, 1 }, /* 6480 Mb/s */
{ 1620, 4 }, /* 5184 Mb/s */
{ 5400, 1 }, /* 4320 Mb/s */
{ 2700, 2 }, /* 4320 Mb/s */
{ 1620, 2 }, /* 2592 Mb/s */
{ 2700, 1 }, /* 2160 Mb/s */
{ 1620, 1 }, /* 1296 Mb/s */
};
unsigned int i;
/*
* Find a combination that can fit into max_bw and does not
* exceed the maximum rate and lanes supported by the DP OUT and
* DP IN adapters.
*/
for (i = 0; i < ARRAY_SIZE(dp_bw); i++) {
if (dp_bw[i][0] > out_rate || dp_bw[i][1] > out_lanes)
continue;
if (dp_bw[i][0] > in_rate || dp_bw[i][1] > in_lanes)
continue;
if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) {
*new_rate = dp_bw[i][0];
*new_lanes = dp_bw[i][1];
return 0;
}
}
return -ENOSR;
}
static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
{
u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw;
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
int ret, max_bw;
/*
* Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for
* newer generation hardware.
*/
if (in->sw->generation < 2 || out->sw->generation < 2)
return 0;
/*
* Perform connection manager handshake between IN and OUT ports
* before capabilities exchange can take place.
*/
ret = tb_dp_cm_handshake(in, out, 3000);
if (ret)
return ret;
/* Read both DP_LOCAL_CAP registers */
ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
in->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
out->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
/* Write IN local caps to OUT remote caps */
ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT,
out->cap_adap + DP_REMOTE_CAP, 1);
if (ret)
return ret;
in_rate = tb_dp_cap_get_rate(in_dp_cap);
in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
in_rate, in_lanes, tb_dp_bandwidth(in_rate, in_lanes));
/*
* If the tunnel bandwidth is limited (max_bw is set) then see
* if we need to reduce bandwidth to fit there.
*/
out_rate = tb_dp_cap_get_rate(out_dp_cap);
out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
bw = tb_dp_bandwidth(out_rate, out_lanes);
tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
out_rate, out_lanes, bw);
if (in->sw->config.depth < out->sw->config.depth)
max_bw = tunnel->max_down;
else
max_bw = tunnel->max_up;
if (max_bw && bw > max_bw) {
u32 new_rate, new_lanes, new_bw;
ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes,
out_rate, out_lanes, &new_rate,
&new_lanes);
if (ret) {
tb_port_info(out, "not enough bandwidth for DP tunnel\n");
return ret;
}
new_bw = tb_dp_bandwidth(new_rate, new_lanes);
tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n",
new_rate, new_lanes, new_bw);
/*
* Set new rate and number of lanes before writing it to
* the IN port remote caps.
*/
out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate);
out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes);
}
/*
* Titan Ridge does not disable AUX timers when it gets
* SET_CONFIG with SET_LTTPR_MODE set. This causes problems with
* DP tunneling.
*/
if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) {
out_dp_cap |= DP_COMMON_CAP_LTTPR_NS;
tb_port_dbg(out, "disabling LTTPR\n");
}
return tb_port_write(in, &out_dp_cap, TB_CFG_PORT,
in->cap_adap + DP_REMOTE_CAP, 1);
}
static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
{
int ret, estimated_bw, granularity, tmp;
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
u32 out_dp_cap, out_rate, out_lanes;
u32 in_dp_cap, in_rate, in_lanes;
u32 rate, lanes;
if (!bw_alloc_mode)
return 0;
ret = usb4_dp_port_set_cm_bandwidth_mode_supported(in, true);
if (ret)
return ret;
ret = usb4_dp_port_set_group_id(in, in->group->index);
if (ret)
return ret;
/*
* Get the non-reduced rate and lanes based on the lowest
* capability of both adapters.
*/
ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT,
in->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT,
out->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
in_rate = tb_dp_cap_get_rate(in_dp_cap);
in_lanes = tb_dp_cap_get_lanes(in_dp_cap);
out_rate = tb_dp_cap_get_rate(out_dp_cap);
out_lanes = tb_dp_cap_get_lanes(out_dp_cap);
rate = min(in_rate, out_rate);
lanes = min(in_lanes, out_lanes);
tmp = tb_dp_bandwidth(rate, lanes);
tb_port_dbg(in, "non-reduced bandwidth %u Mb/s x%u = %u Mb/s\n", rate,
lanes, tmp);
ret = usb4_dp_port_set_nrd(in, rate, lanes);
if (ret)
return ret;
/*
* Pick up granularity that supports maximum possible bandwidth.
* For that we use the UHBR rates too.
*/
in_rate = tb_dp_cap_get_rate_ext(in_dp_cap);
out_rate = tb_dp_cap_get_rate_ext(out_dp_cap);
rate = min(in_rate, out_rate);
tmp = tb_dp_bandwidth(rate, lanes);
tb_port_dbg(in,
"maximum bandwidth through allocation mode %u Mb/s x%u = %u Mb/s\n",
rate, lanes, tmp);
for (granularity = 250; tmp / granularity > 255 && granularity <= 1000;
granularity *= 2)
;
tb_port_dbg(in, "granularity %d Mb/s\n", granularity);
/*
* Returns -EINVAL if granularity above is outside of the
* accepted ranges.
*/
ret = usb4_dp_port_set_granularity(in, granularity);
if (ret)
return ret;
/*
* Bandwidth estimation is pretty much what we have in
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
if (in->sw->config.depth < out->sw->config.depth)
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
tb_port_dbg(in, "estimated bandwidth %d Mb/s\n", estimated_bw);
ret = usb4_dp_port_set_estimated_bandwidth(in, estimated_bw);
if (ret)
return ret;
/* Initial allocation should be 0 according the spec */
ret = usb4_dp_port_allocate_bandwidth(in, 0);
if (ret)
return ret;
tb_port_dbg(in, "bandwidth allocation mode enabled\n");
return 0;
}
static int tb_dp_init(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
struct tb_switch *sw = in->sw;
struct tb *tb = in->sw->tb;
int ret;
ret = tb_dp_xchg_caps(tunnel);
if (ret)
return ret;
if (!tb_switch_is_usb4(sw))
return 0;
if (!usb4_dp_port_bandwidth_mode_supported(in))
return 0;
tb_port_dbg(in, "bandwidth allocation mode supported\n");
ret = usb4_dp_port_set_cm_id(in, tb->index);
if (ret)
return ret;
return tb_dp_bandwidth_alloc_mode_enable(tunnel);
}
static void tb_dp_deinit(struct tb_tunnel *tunnel)
{
struct tb_port *in = tunnel->src_port;
if (!usb4_dp_port_bandwidth_mode_supported(in))
return;
if (usb4_dp_port_bandwidth_mode_enabled(in)) {
usb4_dp_port_set_cm_bandwidth_mode_supported(in, false);
tb_port_dbg(in, "bandwidth allocation mode disabled\n");
}
}
static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
{
int ret;
if (active) {
struct tb_path **paths;
int last;
paths = tunnel->paths;
last = paths[TB_DP_VIDEO_PATH_OUT]->path_length - 1;
tb_dp_port_set_hops(tunnel->src_port,
paths[TB_DP_VIDEO_PATH_OUT]->hops[0].in_hop_index,
paths[TB_DP_AUX_PATH_OUT]->hops[0].in_hop_index,
paths[TB_DP_AUX_PATH_IN]->hops[last].next_hop_index);
tb_dp_port_set_hops(tunnel->dst_port,
paths[TB_DP_VIDEO_PATH_OUT]->hops[last].next_hop_index,
paths[TB_DP_AUX_PATH_IN]->hops[0].in_hop_index,
paths[TB_DP_AUX_PATH_OUT]->hops[last].next_hop_index);
} else {
tb_dp_port_hpd_clear(tunnel->src_port);
tb_dp_port_set_hops(tunnel->src_port, 0, 0, 0);
if (tb_port_is_dpout(tunnel->dst_port))
tb_dp_port_set_hops(tunnel->dst_port, 0, 0, 0);
}
ret = tb_dp_port_enable(tunnel->src_port, active);
if (ret)
return ret;
if (tb_port_is_dpout(tunnel->dst_port))
return tb_dp_port_enable(tunnel->dst_port, active);
return 0;
}
/* max_bw is rounded up to next granularity */
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
int *max_bw)
{
struct tb_port *in = tunnel->src_port;
int ret, rate, lanes, nrd_bw;
u32 cap;
/*
* DP IN adapter DP_LOCAL_CAP gets updated to the lowest AUX
* read parameter values so this so we can use this to determine
* the maximum possible bandwidth over this link.
*
* See USB4 v2 spec 1.0 10.4.4.5.
*/
ret = tb_port_read(in, &cap, TB_CFG_PORT,
in->cap_adap + DP_LOCAL_CAP, 1);
if (ret)
return ret;
rate = tb_dp_cap_get_rate_ext(cap);
if (tb_dp_is_uhbr_rate(rate)) {
/*
* When UHBR is used there is no reduction in lanes so
* we can use this directly.
*/
lanes = tb_dp_cap_get_lanes(cap);
} else {
/*
* If there is no UHBR supported then check the
* non-reduced rate and lanes.
*/
ret = usb4_dp_port_nrd(in, &rate, &lanes);
if (ret)
return ret;
}
nrd_bw = tb_dp_bandwidth(rate, lanes);
if (max_bw) {
ret = usb4_dp_port_granularity(in);
if (ret < 0)
return ret;
*max_bw = roundup(nrd_bw, ret);
}
return nrd_bw;
}
static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up,
int *consumed_down)
{
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
int ret, allocated_bw, max_bw;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
if (!tunnel->bw_mode)
return -EOPNOTSUPP;
/* Read what was allocated previously if any */
ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (allocated_bw == max_bw)
allocated_bw = ret;
tb_port_dbg(in, "consumed bandwidth through allocation mode %d Mb/s\n",
allocated_bw);
if (in->sw->config.depth < out->sw->config.depth) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
*consumed_up = allocated_bw;
*consumed_down = 0;
}
return 0;
}
static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
/*
* If we have already set the allocated bandwidth then use that.
* Otherwise we read it from the DPRX.
*/
if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
int ret, allocated_bw, max_bw;
ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (allocated_bw == max_bw)
allocated_bw = ret;
if (in->sw->config.depth < out->sw->config.depth) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
*allocated_up = allocated_bw;
*allocated_down = 0;
}
return 0;
}
return tunnel->consumed_bandwidth(tunnel, allocated_up,
allocated_down);
}
static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
int max_bw, ret, tmp;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
if (ret < 0)
return ret;
if (in->sw->config.depth < out->sw->config.depth) {
tmp = min(*alloc_down, max_bw);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = tmp;
*alloc_up = 0;
} else {
tmp = min(*alloc_up, max_bw);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = 0;
*alloc_up = tmp;
}
/* Now we can use BW mode registers to figure out the bandwidth */
/* TODO: need to handle discovery too */
tunnel->bw_mode = true;
tb_port_dbg(in, "allocated bandwidth through allocation mode %d Mb/s\n",
tmp);
return 0;
}
static int tb_dp_read_dprx(struct tb_tunnel *tunnel, u32 *rate, u32 *lanes,
int timeout_msec)
{
ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
struct tb_port *in = tunnel->src_port;
/*
* Wait for DPRX done. Normally it should be already set for
* active tunnel.
*/
do {
u32 val;
int ret;
ret = tb_port_read(in, &val, TB_CFG_PORT,
in->cap_adap + DP_COMMON_CAP, 1);
if (ret)
return ret;
if (val & DP_COMMON_CAP_DPRX_DONE) {
*rate = tb_dp_cap_get_rate(val);
*lanes = tb_dp_cap_get_lanes(val);
tb_port_dbg(in, "consumed bandwidth through DPRX %d Mb/s\n",
tb_dp_bandwidth(*rate, *lanes));
return 0;
}
usleep_range(100, 150);
} while (ktime_before(ktime_get(), timeout));
return -ETIMEDOUT;
}
/* Read cap from tunnel DP IN */
static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
u32 *lanes)
{
struct tb_port *in = tunnel->src_port;
u32 val;
int ret;
switch (cap) {
case DP_LOCAL_CAP:
case DP_REMOTE_CAP:
break;
default:
tb_tunnel_WARN(tunnel, "invalid capability index %#x\n", cap);
return -EINVAL;
}
/*
* Read from the copied remote cap so that we take into account
* if capabilities were reduced during exchange.
*/
ret = tb_port_read(in, &val, TB_CFG_PORT, in->cap_adap + cap, 1);
if (ret)
return ret;
*rate = tb_dp_cap_get_rate(val);
*lanes = tb_dp_cap_get_lanes(val);
tb_port_dbg(in, "bandwidth from %#x capability %d Mb/s\n", cap,
tb_dp_bandwidth(*rate, *lanes));
return 0;
}
static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
struct tb_port *in = tunnel->src_port;
int ret;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
if (ret < 0)
return ret;
if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
*max_up = 0;
*max_down = ret;
} else {
*max_up = ret;
*max_down = 0;
}
return 0;
}
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
struct tb_port *in = tunnel->src_port;
const struct tb_switch *sw = in->sw;
u32 rate = 0, lanes = 0;
int ret;
if (tb_dp_is_usb4(sw)) {
/*
* On USB4 routers check if the bandwidth allocation
* mode is enabled first and then read the bandwidth
* through those registers.
*/
ret = tb_dp_bandwidth_mode_consumed_bandwidth(tunnel, consumed_up,
consumed_down);
if (ret < 0) {
if (ret != -EOPNOTSUPP)
return ret;
} else if (!ret) {
return 0;
}
/*
* Then see if the DPRX negotiation is ready and if yes
* return that bandwidth (it may be smaller than the
* reduced one). Otherwise return the remote (possibly
* reduced) caps.
*/
ret = tb_dp_read_dprx(tunnel, &rate, &lanes, 150);
if (ret) {
if (ret == -ETIMEDOUT)
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
&rate, &lanes);
if (ret)
return ret;
}
} else if (sw->generation >= 2) {
ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP, &rate, &lanes);
if (ret)
return ret;
} else {
/* No bandwidth management for legacy devices */
*consumed_up = 0;
*consumed_down = 0;
return 0;
}
if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) {
*consumed_up = 0;
*consumed_down = tb_dp_bandwidth(rate, lanes);
} else {
*consumed_up = tb_dp_bandwidth(rate, lanes);
*consumed_down = 0;
}
return 0;
}
static void tb_dp_init_aux_credits(struct tb_path_hop *hop)
{
struct tb_port *port = hop->in_port;
struct tb_switch *sw = port->sw;
if (tb_port_use_credit_allocation(port))
hop->initial_credits = sw->min_dp_aux_credits;
else
hop->initial_credits = 1;
}
static void tb_dp_init_aux_path(struct tb_path *path)
{
struct tb_path_hop *hop;
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
path->priority = 2;
path->weight = 1;
tb_path_for_each_hop(path, hop)
tb_dp_init_aux_credits(hop);
}
static int tb_dp_init_video_credits(struct tb_path_hop *hop)
{
struct tb_port *port = hop->in_port;
struct tb_switch *sw = port->sw;
if (tb_port_use_credit_allocation(port)) {
unsigned int nfc_credits;
size_t max_dp_streams;
tb_available_credits(port, &max_dp_streams);
/*
* Read the number of currently allocated NFC credits
* from the lane adapter. Since we only use them for DP
* tunneling we can use that to figure out how many DP
* tunnels already go through the lane adapter.
*/
nfc_credits = port->config.nfc_credits &
ADP_CS_4_NFC_BUFFERS_MASK;
if (nfc_credits / sw->min_dp_main_credits > max_dp_streams)
return -ENOSPC;
hop->nfc_credits = sw->min_dp_main_credits;
} else {
hop->nfc_credits = min(port->total_credits - 2, 12U);
}
return 0;
}
static int tb_dp_init_video_path(struct tb_path *path)
{
struct tb_path_hop *hop;
path->egress_fc_enable = TB_PATH_NONE;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
path->priority = 1;
path->weight = 1;
tb_path_for_each_hop(path, hop) {
int ret;
ret = tb_dp_init_video_credits(hop);
if (ret)
return ret;
}
return 0;
}
static void tb_dp_dump(struct tb_tunnel *tunnel)
{
struct tb_port *in, *out;
u32 dp_cap, rate, lanes;
in = tunnel->src_port;
out = tunnel->dst_port;
if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
in->cap_adap + DP_LOCAL_CAP, 1))
return;
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
tb_port_dbg(in, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
rate, lanes, tb_dp_bandwidth(rate, lanes));
out = tunnel->dst_port;
if (tb_port_read(out, &dp_cap, TB_CFG_PORT,
out->cap_adap + DP_LOCAL_CAP, 1))
return;
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
rate, lanes, tb_dp_bandwidth(rate, lanes));
if (tb_port_read(in, &dp_cap, TB_CFG_PORT,
in->cap_adap + DP_REMOTE_CAP, 1))
return;
rate = tb_dp_cap_get_rate(dp_cap);
lanes = tb_dp_cap_get_lanes(dp_cap);
tb_port_dbg(in, "reduced bandwidth %u Mb/s x%u = %u Mb/s\n",
rate, lanes, tb_dp_bandwidth(rate, lanes));
}
/**
* tb_tunnel_discover_dp() - Discover existing Display Port tunnels
* @tb: Pointer to the domain structure
* @in: DP in adapter
* @alloc_hopid: Allocate HopIDs from visited ports
*
* If @in adapter is active, follows the tunnel to the DP out adapter
* and back. Returns the discovered tunnel or %NULL if there was no
* tunnel.
*
* Return: DP tunnel or %NULL if no tunnel found.
*/
struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
bool alloc_hopid)
{
struct tb_tunnel *tunnel;
struct tb_port *port;
struct tb_path *path;
if (!tb_dp_port_is_enabled(in))
return NULL;
tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
if (!tunnel)
return NULL;
tunnel->init = tb_dp_init;
tunnel->deinit = tb_dp_deinit;
tunnel->activate = tb_dp_activate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
&tunnel->dst_port, "Video", alloc_hopid);
if (!path) {
/* Just disable the DP IN port */
tb_dp_port_enable(in, false);
goto err_free;
}
tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path;
if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
goto err_free;
path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
&port, "AUX RX", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_IN] = path;
tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_IN]);
/* Validate that the tunnel is complete */
if (!tb_port_is_dpout(tunnel->dst_port)) {
tb_port_warn(in, "path does not end on a DP adapter, cleaning up\n");
goto err_deactivate;
}
if (!tb_dp_port_is_enabled(tunnel->dst_port))
goto err_deactivate;
if (!tb_dp_port_hpd_is_active(tunnel->dst_port))
goto err_deactivate;
if (port != tunnel->src_port) {
tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
goto err_deactivate;
}
tb_dp_dump(tunnel);
tb_tunnel_dbg(tunnel, "discovered\n");
return tunnel;
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
tb_tunnel_free(tunnel);
return NULL;
}
/**
* tb_tunnel_alloc_dp() - allocate a Display Port tunnel
* @tb: Pointer to the domain structure
* @in: DP in adapter port
* @out: DP out adapter port
* @link_nr: Preferred lane adapter when the link is not bonded
* @max_up: Maximum available upstream bandwidth for the DP tunnel (%0
* if not limited)
* @max_down: Maximum available downstream bandwidth for the DP tunnel
* (%0 if not limited)
*
* Allocates a tunnel between @in and @out that is capable of tunneling
* Display Port traffic.
*
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int link_nr,
int max_up, int max_down)
{
struct tb_tunnel *tunnel;
struct tb_path **paths;
struct tb_path *path;
if (WARN_ON(!in->cap_adap || !out->cap_adap))
return NULL;
tunnel = tb_tunnel_alloc(tb, 3, TB_TUNNEL_DP);
if (!tunnel)
return NULL;
tunnel->init = tb_dp_init;
tunnel->deinit = tb_dp_deinit;
tunnel->activate = tb_dp_activate;
tunnel->maximum_bandwidth = tb_dp_maximum_bandwidth;
tunnel->allocated_bandwidth = tb_dp_allocated_bandwidth;
tunnel->alloc_bandwidth = tb_dp_alloc_bandwidth;
tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth;
tunnel->src_port = in;
tunnel->dst_port = out;
tunnel->max_up = max_up;
tunnel->max_down = max_down;
paths = tunnel->paths;
path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID,
link_nr, "Video");
if (!path)
goto err_free;
tb_dp_init_video_path(path);
paths[TB_DP_VIDEO_PATH_OUT] = path;
path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out,
TB_DP_AUX_TX_HOPID, link_nr, "AUX TX");
if (!path)
goto err_free;
tb_dp_init_aux_path(path);
paths[TB_DP_AUX_PATH_OUT] = path;
path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in,
TB_DP_AUX_RX_HOPID, link_nr, "AUX RX");
if (!path)
goto err_free;
tb_dp_init_aux_path(path);
paths[TB_DP_AUX_PATH_IN] = path;
return tunnel;
err_free:
tb_tunnel_free(tunnel);
return NULL;
}
static unsigned int tb_dma_available_credits(const struct tb_port *port)
{
const struct tb_switch *sw = port->sw;
int credits;
credits = tb_available_credits(port, NULL);
if (tb_acpi_may_tunnel_pcie())
credits -= sw->max_pcie_credits;
credits -= port->dma_credits;
return credits > 0 ? credits : 0;
}
static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits)
{
struct tb_port *port = hop->in_port;
if (tb_port_use_credit_allocation(port)) {
unsigned int available = tb_dma_available_credits(port);
/*
* Need to have at least TB_MIN_DMA_CREDITS, otherwise
* DMA path cannot be established.
*/
if (available < TB_MIN_DMA_CREDITS)
return -ENOSPC;
while (credits > available)
credits--;
tb_port_dbg(port, "reserving %u credits for DMA path\n",
credits);
port->dma_credits += credits;
} else {
if (tb_port_is_null(port))
credits = port->bonded ? 14 : 6;
else
credits = min(port->total_credits, credits);
}
hop->initial_credits = credits;
return 0;
}
/* Path from lane adapter to NHI */
static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits)
{
struct tb_path_hop *hop;
unsigned int i, tmp;
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
path->priority = 5;
path->weight = 1;
path->clear_fc = true;
/*
* First lane adapter is the one connected to the remote host.
* We don't tunnel other traffic over this link so can use all
* the credits (except the ones reserved for control traffic).
*/
hop = &path->hops[0];
tmp = min(tb_usable_credits(hop->in_port), credits);
hop->initial_credits = tmp;
hop->in_port->dma_credits += tmp;
for (i = 1; i < path->path_length; i++) {
int ret;
ret = tb_dma_reserve_credits(&path->hops[i], credits);
if (ret)
return ret;
}
return 0;
}
/* Path from NHI to lane adapter */
static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits)
{
struct tb_path_hop *hop;
path->egress_fc_enable = TB_PATH_ALL;
path->ingress_fc_enable = TB_PATH_ALL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_shared_buffer = TB_PATH_NONE;
path->priority = 5;
path->weight = 1;
path->clear_fc = true;
tb_path_for_each_hop(path, hop) {
int ret;
ret = tb_dma_reserve_credits(hop, credits);
if (ret)
return ret;
}
return 0;
}
static void tb_dma_release_credits(struct tb_path_hop *hop)
{
struct tb_port *port = hop->in_port;
if (tb_port_use_credit_allocation(port)) {
port->dma_credits -= hop->initial_credits;
tb_port_dbg(port, "released %u DMA path credits\n",
hop->initial_credits);
}
}
static void tb_dma_deinit_path(struct tb_path *path)
{
struct tb_path_hop *hop;
tb_path_for_each_hop(path, hop)
tb_dma_release_credits(hop);
}
static void tb_dma_deinit(struct tb_tunnel *tunnel)
{
int i;
for (i = 0; i < tunnel->npaths; i++) {
if (!tunnel->paths[i])
continue;
tb_dma_deinit_path(tunnel->paths[i]);
}
}
/**
* tb_tunnel_alloc_dma() - allocate a DMA tunnel
* @tb: Pointer to the domain structure
* @nhi: Host controller port
* @dst: Destination null port which the other domain is connected to
* @transmit_path: HopID used for transmitting packets
* @transmit_ring: NHI ring number used to send packets towards the
* other domain. Set to %-1 if TX path is not needed.
* @receive_path: HopID used for receiving packets
* @receive_ring: NHI ring number used to receive packets from the
* other domain. Set to %-1 if RX path is not needed.
*
* Return: Returns a tb_tunnel on success or NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
struct tb_port *dst, int transmit_path,
int transmit_ring, int receive_path,
int receive_ring)
{
struct tb_tunnel *tunnel;
size_t npaths = 0, i = 0;
struct tb_path *path;
int credits;
/* Ring 0 is reserved for control channel */
if (WARN_ON(!receive_ring || !transmit_ring))
return NULL;
if (receive_ring > 0)
npaths++;
if (transmit_ring > 0)
npaths++;
if (WARN_ON(!npaths))
return NULL;
tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA);
if (!tunnel)
return NULL;
tunnel->src_port = nhi;
tunnel->dst_port = dst;
tunnel->deinit = tb_dma_deinit;
credits = min_not_zero(dma_credits, nhi->sw->max_dma_credits);
if (receive_ring > 0) {
path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0,
"DMA RX");
if (!path)
goto err_free;
tunnel->paths[i++] = path;
if (tb_dma_init_rx_path(path, credits)) {
tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n");
goto err_free;
}
}
if (transmit_ring > 0) {
path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0,
"DMA TX");
if (!path)
goto err_free;
tunnel->paths[i++] = path;
if (tb_dma_init_tx_path(path, credits)) {
tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n");
goto err_free;
}
}
return tunnel;
err_free:
tb_tunnel_free(tunnel);
return NULL;
}
/**
* tb_tunnel_match_dma() - Match DMA tunnel
* @tunnel: Tunnel to match
* @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore.
* @transmit_ring: NHI ring number used to send packets towards the
* other domain. Pass %-1 to ignore.
* @receive_path: HopID used for receiving packets. Pass %-1 to ignore.
* @receive_ring: NHI ring number used to receive packets from the
* other domain. Pass %-1 to ignore.
*
* This function can be used to match specific DMA tunnel, if there are
* multiple DMA tunnels going through the same XDomain connection.
* Returns true if there is match and false otherwise.
*/
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
int transmit_ring, int receive_path, int receive_ring)
{
const struct tb_path *tx_path = NULL, *rx_path = NULL;
int i;
if (!receive_ring || !transmit_ring)
return false;
for (i = 0; i < tunnel->npaths; i++) {
const struct tb_path *path = tunnel->paths[i];
if (!path)
continue;
if (tb_port_is_nhi(path->hops[0].in_port))
tx_path = path;
else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port))
rx_path = path;
}
if (transmit_ring > 0 || transmit_path > 0) {
if (!tx_path)
return false;
if (transmit_ring > 0 &&
(tx_path->hops[0].in_hop_index != transmit_ring))
return false;
if (transmit_path > 0 &&
(tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path))
return false;
}
if (receive_ring > 0 || receive_path > 0) {
if (!rx_path)
return false;
if (receive_path > 0 &&
(rx_path->hops[0].in_hop_index != receive_path))
return false;
if (receive_ring > 0 &&
(rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring))
return false;
}
return true;
}
static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down)
{
int ret, up_max_rate, down_max_rate;
ret = usb4_usb3_port_max_link_rate(up);
if (ret < 0)
return ret;
up_max_rate = ret;
ret = usb4_usb3_port_max_link_rate(down);
if (ret < 0)
return ret;
down_max_rate = ret;
return min(up_max_rate, down_max_rate);
}
static int tb_usb3_init(struct tb_tunnel *tunnel)
{
tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
return usb4_usb3_port_allocate_bandwidth(tunnel->src_port,
&tunnel->allocated_up,
&tunnel->allocated_down);
}
static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate)
{
int res;
res = tb_usb3_port_enable(tunnel->src_port, activate);
if (res)
return res;
if (tb_port_is_usb3_up(tunnel->dst_port))
return tb_usb3_port_enable(tunnel->dst_port, activate);
return 0;
}
static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up, int *consumed_down)
{
int pcie_enabled = tb_acpi_may_tunnel_pcie();
/*
* PCIe tunneling, if enabled, affects the USB3 bandwidth so
* take that it into account here.
*/
*consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3;
*consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3;
return 0;
}
static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel)
{
int ret;
ret = usb4_usb3_port_release_bandwidth(tunnel->src_port,
&tunnel->allocated_up,
&tunnel->allocated_down);
if (ret)
return ret;
tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
return 0;
}
static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
int *available_up,
int *available_down)
{
int ret, max_rate, allocate_up, allocate_down;
ret = usb4_usb3_port_actual_link_rate(tunnel->src_port);
if (ret < 0) {
tb_tunnel_warn(tunnel, "failed to read actual link rate\n");
return;
} else if (!ret) {
/* Use maximum link rate if the link valid is not set */
ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
if (ret < 0) {
tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
return;
}
}
/*
* 90% of the max rate can be allocated for isochronous
* transfers.
*/
max_rate = ret * 90 / 100;
/* No need to reclaim if already at maximum */
if (tunnel->allocated_up >= max_rate &&
tunnel->allocated_down >= max_rate)
return;
/* Don't go lower than what is already allocated */
allocate_up = min(max_rate, *available_up);
if (allocate_up < tunnel->allocated_up)
allocate_up = tunnel->allocated_up;
allocate_down = min(max_rate, *available_down);
if (allocate_down < tunnel->allocated_down)
allocate_down = tunnel->allocated_down;
/* If no changes no need to do more */
if (allocate_up == tunnel->allocated_up &&
allocate_down == tunnel->allocated_down)
return;
ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up,
&allocate_down);
if (ret) {
tb_tunnel_info(tunnel, "failed to allocate bandwidth\n");
return;
}
tunnel->allocated_up = allocate_up;
*available_up -= tunnel->allocated_up;
tunnel->allocated_down = allocate_down;
*available_down -= tunnel->allocated_down;
tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
}
static void tb_usb3_init_credits(struct tb_path_hop *hop)
{
struct tb_port *port = hop->in_port;
struct tb_switch *sw = port->sw;
unsigned int credits;
if (tb_port_use_credit_allocation(port)) {
credits = sw->max_usb3_credits;
} else {
if (tb_port_is_null(port))
credits = port->bonded ? 32 : 16;
else
credits = 7;
}
hop->initial_credits = credits;
}
static void tb_usb3_init_path(struct tb_path *path)
{
struct tb_path_hop *hop;
path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL;
path->egress_shared_buffer = TB_PATH_NONE;
path->ingress_fc_enable = TB_PATH_ALL;
path->ingress_shared_buffer = TB_PATH_NONE;
path->priority = 3;
path->weight = 3;
path->drop_packages = 0;
tb_path_for_each_hop(path, hop)
tb_usb3_init_credits(hop);
}
/**
* tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
* @tb: Pointer to the domain structure
* @down: USB3 downstream adapter
* @alloc_hopid: Allocate HopIDs from visited ports
*
* If @down adapter is active, follows the tunnel to the USB3 upstream
* adapter and back. Returns the discovered tunnel or %NULL if there was
* no tunnel.
*/
struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
bool alloc_hopid)
{
struct tb_tunnel *tunnel;
struct tb_path *path;
if (!tb_usb3_port_is_enabled(down))
return NULL;
tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
if (!tunnel)
return NULL;
tunnel->activate = tb_usb3_activate;
tunnel->src_port = down;
/*
* Discover both paths even if they are not complete. We will
* clean them up by calling tb_tunnel_deactivate() below in that
* case.
*/
path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
&tunnel->dst_port, "USB3 Down", alloc_hopid);
if (!path) {
/* Just disable the downstream port */
tb_usb3_port_enable(down, false);
goto err_free;
}
tunnel->paths[TB_USB3_PATH_DOWN] = path;
tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
"USB3 Up", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_USB3_PATH_UP] = path;
tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
/* Validate that the tunnel is complete */
if (!tb_port_is_usb3_up(tunnel->dst_port)) {
tb_port_warn(tunnel->dst_port,
"path does not end on an USB3 adapter, cleaning up\n");
goto err_deactivate;
}
if (down != tunnel->src_port) {
tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n");
goto err_deactivate;
}
if (!tb_usb3_port_is_enabled(tunnel->dst_port)) {
tb_tunnel_warn(tunnel,
"tunnel is not fully activated, cleaning up\n");
goto err_deactivate;
}
if (!tb_route(down->sw)) {
int ret;
/*
* Read the initial bandwidth allocation for the first
* hop tunnel.
*/
ret = usb4_usb3_port_allocated_bandwidth(down,
&tunnel->allocated_up, &tunnel->allocated_down);
if (ret)
goto err_deactivate;
tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n",
tunnel->allocated_up, tunnel->allocated_down);
tunnel->init = tb_usb3_init;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
tunnel->reclaim_available_bandwidth =
tb_usb3_reclaim_available_bandwidth;
}
tb_tunnel_dbg(tunnel, "discovered\n");
return tunnel;
err_deactivate:
tb_tunnel_deactivate(tunnel);
err_free:
tb_tunnel_free(tunnel);
return NULL;
}
/**
* tb_tunnel_alloc_usb3() - allocate a USB3 tunnel
* @tb: Pointer to the domain structure
* @up: USB3 upstream adapter port
* @down: USB3 downstream adapter port
* @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0
* if not limited).
* @max_down: Maximum available downstream bandwidth for the USB3 tunnel
* (%0 if not limited).
*
* Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and
* @TB_TYPE_USB3_DOWN.
*
* Return: Returns a tb_tunnel on success or %NULL on failure.
*/
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up,
int max_down)
{
struct tb_tunnel *tunnel;
struct tb_path *path;
int max_rate = 0;
/*
* Check that we have enough bandwidth available for the new
* USB3 tunnel.
*/
if (max_up > 0 || max_down > 0) {
max_rate = tb_usb3_max_link_rate(down, up);
if (max_rate < 0)
return NULL;
/* Only 90% can be allocated for USB3 isochronous transfers */
max_rate = max_rate * 90 / 100;
tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n",
max_rate);
if (max_rate > max_up || max_rate > max_down) {
tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n");
return NULL;
}
}
tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3);
if (!tunnel)
return NULL;
tunnel->activate = tb_usb3_activate;
tunnel->src_port = down;
tunnel->dst_port = up;
tunnel->max_up = max_up;
tunnel->max_down = max_down;
path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0,
"USB3 Down");
if (!path) {
tb_tunnel_free(tunnel);
return NULL;
}
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_DOWN] = path;
path = tb_path_alloc(tb, up, TB_USB3_HOPID, down, TB_USB3_HOPID, 0,
"USB3 Up");
if (!path) {
tb_tunnel_free(tunnel);
return NULL;
}
tb_usb3_init_path(path);
tunnel->paths[TB_USB3_PATH_UP] = path;
if (!tb_route(down->sw)) {
tunnel->allocated_up = max_rate;
tunnel->allocated_down = max_rate;
tunnel->init = tb_usb3_init;
tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth;
tunnel->release_unused_bandwidth =
tb_usb3_release_unused_bandwidth;
tunnel->reclaim_available_bandwidth =
tb_usb3_reclaim_available_bandwidth;
}
return tunnel;
}
/**
* tb_tunnel_free() - free a tunnel
* @tunnel: Tunnel to be freed
*
* Frees a tunnel. The tunnel does not need to be deactivated.
*/
void tb_tunnel_free(struct tb_tunnel *tunnel)
{
int i;
if (!tunnel)
return;
if (tunnel->deinit)
tunnel->deinit(tunnel);
for (i = 0; i < tunnel->npaths; i++) {
if (tunnel->paths[i])
tb_path_free(tunnel->paths[i]);
}
kfree(tunnel->paths);
kfree(tunnel);
}
/**
* tb_tunnel_is_invalid - check whether an activated path is still valid
* @tunnel: Tunnel to check
*/
bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel)
{
int i;
for (i = 0; i < tunnel->npaths; i++) {
WARN_ON(!tunnel->paths[i]->activated);
if (tb_path_is_invalid(tunnel->paths[i]))
return true;
}
return false;
}
/**
* tb_tunnel_restart() - activate a tunnel after a hardware reset
* @tunnel: Tunnel to restart
*
* Return: 0 on success and negative errno in case if failure
*/
int tb_tunnel_restart(struct tb_tunnel *tunnel)
{
int res, i;
tb_tunnel_dbg(tunnel, "activating\n");
/*
* Make sure all paths are properly disabled before enabling
* them again.
*/
for (i = 0; i < tunnel->npaths; i++) {
if (tunnel->paths[i]->activated) {
tb_path_deactivate(tunnel->paths[i]);
tunnel->paths[i]->activated = false;
}
}
if (tunnel->init) {
res = tunnel->init(tunnel);
if (res)
return res;
}
for (i = 0; i < tunnel->npaths; i++) {
res = tb_path_activate(tunnel->paths[i]);
if (res)
goto err;
}
if (tunnel->activate) {
res = tunnel->activate(tunnel, true);
if (res)
goto err;
}
return 0;
err:
tb_tunnel_warn(tunnel, "activation failed\n");
tb_tunnel_deactivate(tunnel);
return res;
}
/**
* tb_tunnel_activate() - activate a tunnel
* @tunnel: Tunnel to activate
*
* Return: Returns 0 on success or an error code on failure.
*/
int tb_tunnel_activate(struct tb_tunnel *tunnel)
{
int i;
for (i = 0; i < tunnel->npaths; i++) {
if (tunnel->paths[i]->activated) {
tb_tunnel_WARN(tunnel,
"trying to activate an already activated tunnel\n");
return -EINVAL;
}
}
return tb_tunnel_restart(tunnel);
}
/**
* tb_tunnel_deactivate() - deactivate a tunnel
* @tunnel: Tunnel to deactivate
*/
void tb_tunnel_deactivate(struct tb_tunnel *tunnel)
{
int i;
tb_tunnel_dbg(tunnel, "deactivating\n");
if (tunnel->activate)
tunnel->activate(tunnel, false);
for (i = 0; i < tunnel->npaths; i++) {
if (tunnel->paths[i] && tunnel->paths[i]->activated)
tb_path_deactivate(tunnel->paths[i]);
}
}
/**
* tb_tunnel_port_on_path() - Does the tunnel go through port
* @tunnel: Tunnel to check
* @port: Port to check
*
* Returns true if @tunnel goes through @port (direction does not matter),
* false otherwise.
*/
bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel,
const struct tb_port *port)
{
int i;
for (i = 0; i < tunnel->npaths; i++) {
if (!tunnel->paths[i])
continue;
if (tb_path_port_on_path(tunnel->paths[i], port))
return true;
}
return false;
}
static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel)
{
int i;
for (i = 0; i < tunnel->npaths; i++) {
if (!tunnel->paths[i])
return false;
if (!tunnel->paths[i]->activated)
return false;
}
return true;
}
/**
* tb_tunnel_maximum_bandwidth() - Return maximum possible bandwidth
* @tunnel: Tunnel to check
* @max_up: Maximum upstream bandwidth in Mb/s
* @max_down: Maximum downstream bandwidth in Mb/s
*
* Returns maximum possible bandwidth this tunnel can go if not limited
* by other bandwidth clients. If the tunnel does not support this
* returns %-EOPNOTSUPP.
*/
int tb_tunnel_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
if (!tb_tunnel_is_active(tunnel))
return -EINVAL;
if (tunnel->maximum_bandwidth)
return tunnel->maximum_bandwidth(tunnel, max_up, max_down);
return -EOPNOTSUPP;
}
/**
* tb_tunnel_allocated_bandwidth() - Return bandwidth allocated for the tunnel
* @tunnel: Tunnel to check
* @allocated_up: Currently allocated upstream bandwidth in Mb/s is stored here
* @allocated_down: Currently allocated downstream bandwidth in Mb/s is
* stored here
*
* Returns the bandwidth allocated for the tunnel. This may be higher
* than what the tunnel actually consumes.
*/
int tb_tunnel_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
if (!tb_tunnel_is_active(tunnel))
return -EINVAL;
if (tunnel->allocated_bandwidth)
return tunnel->allocated_bandwidth(tunnel, allocated_up,
allocated_down);
return -EOPNOTSUPP;
}
/**
* tb_tunnel_alloc_bandwidth() - Change tunnel bandwidth allocation
* @tunnel: Tunnel whose bandwidth allocation to change
* @alloc_up: New upstream bandwidth in Mb/s
* @alloc_down: New downstream bandwidth in Mb/s
*
* Tries to change tunnel bandwidth allocation. If succeeds returns %0
* and updates @alloc_up and @alloc_down to that was actually allocated
* (it may not be the same as passed originally). Returns negative errno
* in case of failure.
*/
int tb_tunnel_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
if (!tb_tunnel_is_active(tunnel))
return -EINVAL;
if (tunnel->alloc_bandwidth)
return tunnel->alloc_bandwidth(tunnel, alloc_up, alloc_down);
return -EOPNOTSUPP;
}
/**
* tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel
* @tunnel: Tunnel to check
* @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port.
* Can be %NULL.
* @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port.
* Can be %NULL.
*
* Stores the amount of isochronous bandwidth @tunnel consumes in
* @consumed_up and @consumed_down. In case of success returns %0,
* negative errno otherwise.
*/
int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
int up_bw = 0, down_bw = 0;
if (!tb_tunnel_is_active(tunnel))
goto out;
if (tunnel->consumed_bandwidth) {
int ret;
ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw);
if (ret)
return ret;
tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw,
down_bw);
}
out:
if (consumed_up)
*consumed_up = up_bw;
if (consumed_down)
*consumed_down = down_bw;
return 0;
}
/**
* tb_tunnel_release_unused_bandwidth() - Release unused bandwidth
* @tunnel: Tunnel whose unused bandwidth to release
*
* If tunnel supports dynamic bandwidth management (USB3 tunnels at the
* moment) this function makes it to release all the unused bandwidth.
*
* Returns %0 in case of success and negative errno otherwise.
*/
int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel)
{
if (!tb_tunnel_is_active(tunnel))
return 0;
if (tunnel->release_unused_bandwidth) {
int ret;
ret = tunnel->release_unused_bandwidth(tunnel);
if (ret)
return ret;
}
return 0;
}
/**
* tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth
* @tunnel: Tunnel reclaiming available bandwidth
* @available_up: Available upstream bandwidth (in Mb/s)
* @available_down: Available downstream bandwidth (in Mb/s)
*
* Reclaims bandwidth from @available_up and @available_down and updates
* the variables accordingly (e.g decreases both according to what was
* reclaimed by the tunnel). If nothing was reclaimed the values are
* kept as is.
*/
void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
int *available_up,
int *available_down)
{
if (!tb_tunnel_is_active(tunnel))
return;
if (tunnel->reclaim_available_bandwidth)
tunnel->reclaim_available_bandwidth(tunnel, available_up,
available_down);
}
| linux-master | drivers/thunderbolt/tunnel.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt DMA configuration based mailbox support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <[email protected]>
* Mika Westerberg <[email protected]>
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include "dma_port.h"
#include "tb_regs.h"
#define DMA_PORT_CAP 0x3e
#define MAIL_DATA 1
#define MAIL_DATA_DWORDS 16
#define MAIL_IN 17
#define MAIL_IN_CMD_SHIFT 28
#define MAIL_IN_CMD_MASK GENMASK(31, 28)
#define MAIL_IN_CMD_FLASH_WRITE 0x0
#define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1
#define MAIL_IN_CMD_FLASH_READ 0x2
#define MAIL_IN_CMD_POWER_CYCLE 0x4
#define MAIL_IN_DWORDS_SHIFT 24
#define MAIL_IN_DWORDS_MASK GENMASK(27, 24)
#define MAIL_IN_ADDRESS_SHIFT 2
#define MAIL_IN_ADDRESS_MASK GENMASK(23, 2)
#define MAIL_IN_CSS BIT(1)
#define MAIL_IN_OP_REQUEST BIT(0)
#define MAIL_OUT 18
#define MAIL_OUT_STATUS_RESPONSE BIT(29)
#define MAIL_OUT_STATUS_CMD_SHIFT 4
#define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4)
#define MAIL_OUT_STATUS_MASK GENMASK(3, 0)
#define MAIL_OUT_STATUS_COMPLETED 0
#define MAIL_OUT_STATUS_ERR_AUTH 1
#define MAIL_OUT_STATUS_ERR_ACCESS 2
#define DMA_PORT_TIMEOUT 5000 /* ms */
#define DMA_PORT_RETRIES 3
/**
* struct tb_dma_port - DMA control port
* @sw: Switch the DMA port belongs to
* @port: Switch port number where DMA capability is found
* @base: Start offset of the mailbox registers
* @buf: Temporary buffer to store a single block
*/
struct tb_dma_port {
struct tb_switch *sw;
u8 port;
u32 base;
u8 *buf;
};
/*
* When the switch is in safe mode it supports very little functionality
* so we don't validate that much here.
*/
static bool dma_port_match(const struct tb_cfg_request *req,
const struct ctl_pkg *pkg)
{
u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
if (pkg->frame.eof == TB_CFG_PKG_ERROR)
return true;
if (pkg->frame.eof != req->response_type)
return false;
if (route != tb_cfg_get_route(req->request))
return false;
if (pkg->frame.size != req->response_size)
return false;
return true;
}
static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
{
memcpy(req->response, pkg->buffer, req->response_size);
return true;
}
static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
u32 port, u32 offset, u32 length, int timeout_msec)
{
struct cfg_read_pkg request = {
.header = tb_cfg_make_header(route),
.addr = {
.seq = 1,
.port = port,
.space = TB_CFG_PORT,
.offset = offset,
.length = length,
},
};
struct tb_cfg_request *req;
struct cfg_write_pkg reply;
struct tb_cfg_result res;
req = tb_cfg_request_alloc();
if (!req)
return -ENOMEM;
req->match = dma_port_match;
req->copy = dma_port_copy;
req->request = &request;
req->request_size = sizeof(request);
req->request_type = TB_CFG_PKG_READ;
req->response = &reply;
req->response_size = 12 + 4 * length;
req->response_type = TB_CFG_PKG_READ;
res = tb_cfg_request_sync(ctl, req, timeout_msec);
tb_cfg_request_put(req);
if (res.err)
return res.err;
memcpy(buffer, &reply.data, 4 * length);
return 0;
}
static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
u32 port, u32 offset, u32 length, int timeout_msec)
{
struct cfg_write_pkg request = {
.header = tb_cfg_make_header(route),
.addr = {
.seq = 1,
.port = port,
.space = TB_CFG_PORT,
.offset = offset,
.length = length,
},
};
struct tb_cfg_request *req;
struct cfg_read_pkg reply;
struct tb_cfg_result res;
memcpy(&request.data, buffer, length * 4);
req = tb_cfg_request_alloc();
if (!req)
return -ENOMEM;
req->match = dma_port_match;
req->copy = dma_port_copy;
req->request = &request;
req->request_size = 12 + 4 * length;
req->request_type = TB_CFG_PKG_WRITE;
req->response = &reply;
req->response_size = sizeof(reply);
req->response_type = TB_CFG_PKG_WRITE;
res = tb_cfg_request_sync(ctl, req, timeout_msec);
tb_cfg_request_put(req);
return res.err;
}
static int dma_find_port(struct tb_switch *sw)
{
static const int ports[] = { 3, 5, 7 };
int i;
/*
* The DMA (NHI) port is either 3, 5 or 7 depending on the
* controller. Try all of them.
*/
for (i = 0; i < ARRAY_SIZE(ports); i++) {
u32 type;
int ret;
ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i],
2, 1, DMA_PORT_TIMEOUT);
if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
return ports[i];
}
return -ENODEV;
}
/**
* dma_port_alloc() - Finds DMA control port from a switch pointed by route
* @sw: Switch from where find the DMA port
*
* Function checks if the switch NHI port supports DMA configuration
* based mailbox capability and if it does, allocates and initializes
* DMA port structure. Returns %NULL if the capabity was not found.
*
* The DMA control port is functional also when the switch is in safe
* mode.
*/
struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
{
struct tb_dma_port *dma;
int port;
port = dma_find_port(sw);
if (port < 0)
return NULL;
dma = kzalloc(sizeof(*dma), GFP_KERNEL);
if (!dma)
return NULL;
dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
if (!dma->buf) {
kfree(dma);
return NULL;
}
dma->sw = sw;
dma->port = port;
dma->base = DMA_PORT_CAP;
return dma;
}
/**
* dma_port_free() - Release DMA control port structure
* @dma: DMA control port
*/
void dma_port_free(struct tb_dma_port *dma)
{
if (dma) {
kfree(dma->buf);
kfree(dma);
}
}
static int dma_port_wait_for_completion(struct tb_dma_port *dma,
unsigned int timeout)
{
unsigned long end = jiffies + msecs_to_jiffies(timeout);
struct tb_switch *sw = dma->sw;
do {
int ret;
u32 in;
ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
dma->base + MAIL_IN, 1, 50);
if (ret) {
if (ret != -ETIMEDOUT)
return ret;
} else if (!(in & MAIL_IN_OP_REQUEST)) {
return 0;
}
usleep_range(50, 100);
} while (time_before(jiffies, end));
return -ETIMEDOUT;
}
static int status_to_errno(u32 status)
{
switch (status & MAIL_OUT_STATUS_MASK) {
case MAIL_OUT_STATUS_COMPLETED:
return 0;
case MAIL_OUT_STATUS_ERR_AUTH:
return -EINVAL;
case MAIL_OUT_STATUS_ERR_ACCESS:
return -EACCES;
}
return -EIO;
}
static int dma_port_request(struct tb_dma_port *dma, u32 in,
unsigned int timeout)
{
struct tb_switch *sw = dma->sw;
u32 out;
int ret;
ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
if (ret)
return ret;
ret = dma_port_wait_for_completion(dma, timeout);
if (ret)
return ret;
ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
if (ret)
return ret;
return status_to_errno(out);
}
static int dma_port_flash_read_block(void *data, unsigned int dwaddress,
void *buf, size_t dwords)
{
struct tb_dma_port *dma = data;
struct tb_switch *sw = dma->sw;
int ret;
u32 in;
in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
if (dwords < MAIL_DATA_DWORDS)
in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
in |= MAIL_IN_OP_REQUEST;
ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
if (ret)
return ret;
return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
}
static int dma_port_flash_write_block(void *data, unsigned int dwaddress,
const void *buf, size_t dwords)
{
struct tb_dma_port *dma = data;
struct tb_switch *sw = dma->sw;
int ret;
u32 in;
/* Write the block to MAIL_DATA registers */
ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
if (ret)
return ret;
in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
/* CSS header write is always done to the same magic address */
if (dwaddress >= DMA_PORT_CSS_ADDRESS)
in |= MAIL_IN_CSS;
in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
in |= MAIL_IN_OP_REQUEST;
return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
}
/**
* dma_port_flash_read() - Read from active flash region
* @dma: DMA control port
* @address: Address relative to the start of active region
* @buf: Buffer where the data is read
* @size: Size of the buffer
*/
int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
void *buf, size_t size)
{
return tb_nvm_read_data(address, buf, size, DMA_PORT_RETRIES,
dma_port_flash_read_block, dma);
}
/**
* dma_port_flash_write() - Write to non-active flash region
* @dma: DMA control port
* @address: Address relative to the start of non-active region
* @buf: Data to write
* @size: Size of the buffer
*
* Writes block of data to the non-active flash region of the switch. If
* the address is given as %DMA_PORT_CSS_ADDRESS the block is written
* using CSS command.
*/
int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
const void *buf, size_t size)
{
if (address >= DMA_PORT_CSS_ADDRESS && size > DMA_PORT_CSS_MAX_SIZE)
return -E2BIG;
return tb_nvm_write_data(address, buf, size, DMA_PORT_RETRIES,
dma_port_flash_write_block, dma);
}
/**
* dma_port_flash_update_auth() - Starts flash authenticate cycle
* @dma: DMA control port
*
* Starts the flash update authentication cycle. If the image in the
* non-active area was valid, the switch starts upgrade process where
* active and non-active area get swapped in the end. Caller should call
* dma_port_flash_update_auth_status() to get status of this command.
* This is because if the switch in question is root switch the
* thunderbolt host controller gets reset as well.
*/
int dma_port_flash_update_auth(struct tb_dma_port *dma)
{
u32 in;
in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
in |= MAIL_IN_OP_REQUEST;
return dma_port_request(dma, in, 150);
}
/**
* dma_port_flash_update_auth_status() - Reads status of update auth command
* @dma: DMA control port
* @status: Status code of the operation
*
* The function checks if there is status available from the last update
* auth command. Returns %0 if there is no status and no further
* action is required. If there is status, %1 is returned instead and
* @status holds the failure code.
*
* Negative return means there was an error reading status from the
* switch.
*/
int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
{
struct tb_switch *sw = dma->sw;
u32 out, cmd;
int ret;
ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
if (ret)
return ret;
/* Check if the status relates to flash update auth */
cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
if (status)
*status = out & MAIL_OUT_STATUS_MASK;
/* Reset is needed in any case */
return 1;
}
return 0;
}
/**
* dma_port_power_cycle() - Power cycles the switch
* @dma: DMA control port
*
* Triggers power cycle to the switch.
*/
int dma_port_power_cycle(struct tb_dma_port *dma)
{
u32 in;
in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
in |= MAIL_IN_OP_REQUEST;
return dma_port_request(dma, in, 150);
}
| linux-master | drivers/thunderbolt/dma_port.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt/USB4 retimer support.
*
* Copyright (C) 2020, Intel Corporation
* Authors: Kranthi Kuntala <[email protected]>
* Mika Westerberg <[email protected]>
*/
#include <linux/delay.h>
#include <linux/pm_runtime.h>
#include <linux/sched/signal.h>
#include "sb_regs.h"
#include "tb.h"
#define TB_MAX_RETIMER_INDEX 6
/**
* tb_retimer_nvm_read() - Read contents of retimer NVM
* @rt: Retimer device
* @address: NVM address (in bytes) to start reading
* @buf: Data read from NVM is stored here
* @size: Number of bytes to read
*
* Reads retimer NVM and copies the contents to @buf. Returns %0 if the
* read was successful and negative errno in case of failure.
*/
int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
size_t size)
{
return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
}
static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
int ret;
pm_runtime_get_sync(&rt->dev);
if (!mutex_trylock(&rt->tb->lock)) {
ret = restart_syscall();
goto out;
}
ret = tb_retimer_nvm_read(rt, offset, val, bytes);
mutex_unlock(&rt->tb->lock);
out:
pm_runtime_mark_last_busy(&rt->dev);
pm_runtime_put_autosuspend(&rt->dev);
return ret;
}
static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
{
struct tb_nvm *nvm = priv;
struct tb_retimer *rt = tb_to_retimer(nvm->dev);
int ret = 0;
if (!mutex_trylock(&rt->tb->lock))
return restart_syscall();
ret = tb_nvm_write_buf(nvm, offset, val, bytes);
mutex_unlock(&rt->tb->lock);
return ret;
}
static int tb_retimer_nvm_add(struct tb_retimer *rt)
{
struct tb_nvm *nvm;
int ret;
nvm = tb_nvm_alloc(&rt->dev);
if (IS_ERR(nvm)) {
ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
goto err_nvm;
}
ret = tb_nvm_read_version(nvm);
if (ret)
goto err_nvm;
ret = tb_nvm_add_active(nvm, nvm_read);
if (ret)
goto err_nvm;
ret = tb_nvm_add_non_active(nvm, nvm_write);
if (ret)
goto err_nvm;
rt->nvm = nvm;
return 0;
err_nvm:
dev_dbg(&rt->dev, "NVM upgrade disabled\n");
if (!IS_ERR(nvm))
tb_nvm_free(nvm);
return ret;
}
static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
{
unsigned int image_size;
const u8 *buf;
int ret;
ret = tb_nvm_validate(rt->nvm);
if (ret)
return ret;
buf = rt->nvm->buf_data_start;
image_size = rt->nvm->buf_data_size;
ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
image_size);
if (ret)
return ret;
rt->nvm->flushed = true;
return 0;
}
static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
{
u32 status;
int ret;
if (auth_only) {
ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
if (ret)
return ret;
}
ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
if (ret)
return ret;
usleep_range(100, 150);
/*
* Check the status now if we still can access the retimer. It
* is expected that the below fails.
*/
ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
&status);
if (!ret) {
rt->auth_status = status;
return status ? -EINVAL : 0;
}
return 0;
}
static ssize_t device_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_retimer *rt = tb_to_retimer(dev);
return sysfs_emit(buf, "%#x\n", rt->device);
}
static DEVICE_ATTR_RO(device);
static ssize_t nvm_authenticate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tb_retimer *rt = tb_to_retimer(dev);
int ret;
if (!mutex_trylock(&rt->tb->lock))
return restart_syscall();
if (!rt->nvm)
ret = -EAGAIN;
else if (rt->no_nvm_upgrade)
ret = -EOPNOTSUPP;
else
ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
mutex_unlock(&rt->tb->lock);
return ret;
}
static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
{
int i;
tb_port_dbg(port, "reading NVM authentication status of retimers\n");
/*
* Before doing anything else, read the authentication status.
* If the retimer has it set, store it for the new retimer
* device instance.
*/
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
}
static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
{
int i;
/*
* When USB4 port is online sideband communications are
* already up.
*/
if (!usb4_port_device_is_offline(port->usb4))
return;
tb_port_dbg(port, "enabling sideband transactions\n");
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
usb4_port_retimer_set_inbound_sbtx(port, i);
}
static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
{
int i;
/*
* When USB4 port is offline we need to keep the sideband
* communications up to make it possible to communicate with
* the connected retimers.
*/
if (usb4_port_device_is_offline(port->usb4))
return;
tb_port_dbg(port, "disabling sideband transactions\n");
for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
usb4_port_retimer_unset_inbound_sbtx(port, i);
}
static ssize_t nvm_authenticate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct tb_retimer *rt = tb_to_retimer(dev);
int val, ret;
pm_runtime_get_sync(&rt->dev);
if (!mutex_trylock(&rt->tb->lock)) {
ret = restart_syscall();
goto exit_rpm;
}
if (!rt->nvm) {
ret = -EAGAIN;
goto exit_unlock;
}
ret = kstrtoint(buf, 10, &val);
if (ret)
goto exit_unlock;
/* Always clear status */
rt->auth_status = 0;
if (val) {
/*
* When NVM authentication starts the retimer is not
* accessible so calling tb_retimer_unset_inbound_sbtx()
* will fail and therefore we do not call it. Exception
* is when the validation fails or we only write the new
* NVM image without authentication.
*/
tb_retimer_set_inbound_sbtx(rt->port);
if (val == AUTHENTICATE_ONLY) {
ret = tb_retimer_nvm_authenticate(rt, true);
} else {
if (!rt->nvm->flushed) {
if (!rt->nvm->buf) {
ret = -EINVAL;
goto exit_unlock;
}
ret = tb_retimer_nvm_validate_and_write(rt);
if (ret || val == WRITE_ONLY)
goto exit_unlock;
}
if (val == WRITE_AND_AUTHENTICATE)
ret = tb_retimer_nvm_authenticate(rt, false);
}
}
exit_unlock:
if (ret || val == WRITE_ONLY)
tb_retimer_unset_inbound_sbtx(rt->port);
mutex_unlock(&rt->tb->lock);
exit_rpm:
pm_runtime_mark_last_busy(&rt->dev);
pm_runtime_put_autosuspend(&rt->dev);
if (ret)
return ret;
return count;
}
static DEVICE_ATTR_RW(nvm_authenticate);
static ssize_t nvm_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tb_retimer *rt = tb_to_retimer(dev);
int ret;
if (!mutex_trylock(&rt->tb->lock))
return restart_syscall();
if (!rt->nvm)
ret = -EAGAIN;
else
ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
mutex_unlock(&rt->tb->lock);
return ret;
}
static DEVICE_ATTR_RO(nvm_version);
static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_retimer *rt = tb_to_retimer(dev);
return sysfs_emit(buf, "%#x\n", rt->vendor);
}
static DEVICE_ATTR_RO(vendor);
static struct attribute *retimer_attrs[] = {
&dev_attr_device.attr,
&dev_attr_nvm_authenticate.attr,
&dev_attr_nvm_version.attr,
&dev_attr_vendor.attr,
NULL
};
static const struct attribute_group retimer_group = {
.attrs = retimer_attrs,
};
static const struct attribute_group *retimer_groups[] = {
&retimer_group,
NULL
};
static void tb_retimer_release(struct device *dev)
{
struct tb_retimer *rt = tb_to_retimer(dev);
kfree(rt);
}
struct device_type tb_retimer_type = {
.name = "thunderbolt_retimer",
.groups = retimer_groups,
.release = tb_retimer_release,
};
static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
{
struct tb_retimer *rt;
u32 vendor, device;
int ret;
ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
sizeof(vendor));
if (ret) {
if (ret != -ENODEV)
tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
return ret;
}
ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
sizeof(device));
if (ret) {
if (ret != -ENODEV)
tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
return ret;
}
/*
* Check that it supports NVM operations. If not then don't add
* the device at all.
*/
ret = usb4_port_retimer_nvm_sector_size(port, index);
if (ret < 0)
return ret;
rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (!rt)
return -ENOMEM;
rt->index = index;
rt->vendor = vendor;
rt->device = device;
rt->auth_status = auth_status;
rt->port = port;
rt->tb = port->sw->tb;
rt->dev.parent = &port->usb4->dev;
rt->dev.bus = &tb_bus_type;
rt->dev.type = &tb_retimer_type;
dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
port->port, index);
ret = device_register(&rt->dev);
if (ret) {
dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
put_device(&rt->dev);
return ret;
}
ret = tb_retimer_nvm_add(rt);
if (ret) {
dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
device_unregister(&rt->dev);
return ret;
}
dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
rt->vendor, rt->device);
pm_runtime_no_callbacks(&rt->dev);
pm_runtime_set_active(&rt->dev);
pm_runtime_enable(&rt->dev);
pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
pm_runtime_mark_last_busy(&rt->dev);
pm_runtime_use_autosuspend(&rt->dev);
return 0;
}
static void tb_retimer_remove(struct tb_retimer *rt)
{
dev_info(&rt->dev, "retimer disconnected\n");
tb_nvm_free(rt->nvm);
device_unregister(&rt->dev);
}
struct tb_retimer_lookup {
const struct tb_port *port;
u8 index;
};
static int retimer_match(struct device *dev, void *data)
{
const struct tb_retimer_lookup *lookup = data;
struct tb_retimer *rt = tb_to_retimer(dev);
return rt && rt->port == lookup->port && rt->index == lookup->index;
}
static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
{
struct tb_retimer_lookup lookup = { .port = port, .index = index };
struct device *dev;
dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
if (dev)
return tb_to_retimer(dev);
return NULL;
}
/**
* tb_retimer_scan() - Scan for on-board retimers under port
* @port: USB4 port to scan
* @add: If true also registers found retimers
*
* Brings the sideband into a state where retimers can be accessed.
* Then Tries to enumerate on-board retimers connected to @port. Found
* retimers are registered as children of @port if @add is set. Does
* not scan for cable retimers for now.
*/
int tb_retimer_scan(struct tb_port *port, bool add)
{
u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
int ret, i, last_idx = 0;
/*
* Send broadcast RT to make sure retimer indices facing this
* port are set.
*/
ret = usb4_port_enumerate_retimers(port);
if (ret)
return ret;
/*
* Immediately after sending enumerate retimers read the
* authentication status of each retimer.
*/
tb_retimer_nvm_authenticate_status(port, status);
/*
* Enable sideband channel for each retimer. We can do this
* regardless whether there is device connected or not.
*/
tb_retimer_set_inbound_sbtx(port);
for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
/*
* Last retimer is true only for the last on-board
* retimer (the one connected directly to the Type-C
* port).
*/
ret = usb4_port_retimer_is_last(port, i);
if (ret > 0)
last_idx = i;
else if (ret < 0)
break;
}
tb_retimer_unset_inbound_sbtx(port);
if (!last_idx)
return 0;
/* Add on-board retimers if they do not exist already */
ret = 0;
for (i = 1; i <= last_idx; i++) {
struct tb_retimer *rt;
rt = tb_port_find_retimer(port, i);
if (rt) {
put_device(&rt->dev);
} else if (add) {
ret = tb_retimer_add(port, i, status[i]);
if (ret && ret != -EOPNOTSUPP)
break;
}
}
return ret;
}
static int remove_retimer(struct device *dev, void *data)
{
struct tb_retimer *rt = tb_to_retimer(dev);
struct tb_port *port = data;
if (rt && rt->port == port)
tb_retimer_remove(rt);
return 0;
}
/**
* tb_retimer_remove_all() - Remove all retimers under port
* @port: USB4 port whose retimers to remove
*
* This removes all previously added retimers under @port.
*/
void tb_retimer_remove_all(struct tb_port *port)
{
struct usb4_port *usb4;
usb4 = port->usb4;
if (usb4)
device_for_each_child_reverse(&usb4->dev, port,
remove_retimer);
}
| linux-master | drivers/thunderbolt/retimer.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Thunderbolt driver - NHI driver
*
* The NHI (native host interface) is the pci device that allows us to send and
* receive frames from the thunderbolt bus.
*
* Copyright (c) 2014 Andreas Noever <[email protected]>
* Copyright (C) 2018, Intel Corporation
*/
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/iommu.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/property.h>
#include <linux/string_helpers.h>
#include "nhi.h"
#include "nhi_regs.h"
#include "tb.h"
#define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
#define RING_FIRST_USABLE_HOPID 1
/*
* Used with QUIRK_E2E to specify an unused HopID the Rx credits are
* transferred.
*/
#define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID
/*
* Minimal number of vectors when we use MSI-X. Two for control channel
* Rx/Tx and the rest four are for cross domain DMA paths.
*/
#define MSIX_MIN_VECS 6
#define MSIX_MAX_VECS 16
#define NHI_MAILBOX_TIMEOUT 500 /* ms */
/* Host interface quirks */
#define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1)
static bool host_reset = true;
module_param(host_reset, bool, 0444);
MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
static int ring_interrupt_index(const struct tb_ring *ring)
{
int bit = ring->hop;
if (!ring->is_tx)
bit += ring->nhi->hop_count;
return bit;
}
static void nhi_mask_interrupt(struct tb_nhi *nhi, int mask, int ring)
{
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
u32 val;
val = ioread32(nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
iowrite32(val & ~mask, nhi->iobase + REG_RING_INTERRUPT_BASE + ring);
} else {
iowrite32(mask, nhi->iobase + REG_RING_INTERRUPT_MASK_CLEAR_BASE + ring);
}
}
static void nhi_clear_interrupt(struct tb_nhi *nhi, int ring)
{
if (nhi->quirks & QUIRK_AUTO_CLEAR_INT)
ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + ring);
else
iowrite32(~0, nhi->iobase + REG_RING_INT_CLEAR + ring);
}
/*
* ring_interrupt_active() - activate/deactivate interrupts for a single ring
*
* ring->nhi->lock must be held.
*/
static void ring_interrupt_active(struct tb_ring *ring, bool active)
{
int index = ring_interrupt_index(ring) / 32 * 4;
int reg = REG_RING_INTERRUPT_BASE + index;
int interrupt_bit = ring_interrupt_index(ring) & 31;
int mask = 1 << interrupt_bit;
u32 old, new;
if (ring->irq > 0) {
u32 step, shift, ivr, misc;
void __iomem *ivr_base;
int auto_clear_bit;
int index;
if (ring->is_tx)
index = ring->hop;
else
index = ring->hop + ring->nhi->hop_count;
/*
* Intel routers support a bit that isn't part of
* the USB4 spec to ask the hardware to clear
* interrupt status bits automatically since
* we already know which interrupt was triggered.
*
* Other routers explicitly disable auto-clear
* to prevent conditions that may occur where two
* MSIX interrupts are simultaneously active and
* reading the register clears both of them.
*/
misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
else
auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
if (!(misc & auto_clear_bit))
iowrite32(misc | auto_clear_bit,
ring->nhi->iobase + REG_DMA_MISC);
ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
ivr = ioread32(ivr_base + step);
ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
if (active)
ivr |= ring->vector << shift;
iowrite32(ivr, ivr_base + step);
}
old = ioread32(ring->nhi->iobase + reg);
if (active)
new = old | mask;
else
new = old & ~mask;
dev_dbg(&ring->nhi->pdev->dev,
"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
active ? "enabling" : "disabling", reg, interrupt_bit, old, new);
if (new == old)
dev_WARN(&ring->nhi->pdev->dev,
"interrupt for %s %d is already %s\n",
RING_TYPE(ring), ring->hop,
active ? "enabled" : "disabled");
if (active)
iowrite32(new, ring->nhi->iobase + reg);
else
nhi_mask_interrupt(ring->nhi, mask, index);
}
/*
* nhi_disable_interrupts() - disable interrupts for all rings
*
* Use only during init and shutdown.
*/
static void nhi_disable_interrupts(struct tb_nhi *nhi)
{
int i = 0;
/* disable interrupts */
for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
nhi_mask_interrupt(nhi, ~0, 4 * i);
/* clear interrupt status bits */
for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
nhi_clear_interrupt(nhi, 4 * i);
}
/* ring helper methods */
static void __iomem *ring_desc_base(struct tb_ring *ring)
{
void __iomem *io = ring->nhi->iobase;
io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
io += ring->hop * 16;
return io;
}
static void __iomem *ring_options_base(struct tb_ring *ring)
{
void __iomem *io = ring->nhi->iobase;
io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
io += ring->hop * 32;
return io;
}
static void ring_iowrite_cons(struct tb_ring *ring, u16 cons)
{
/*
* The other 16-bits in the register is read-only and writes to it
* are ignored by the hardware so we can save one ioread32() by
* filling the read-only bits with zeroes.
*/
iowrite32(cons, ring_desc_base(ring) + 8);
}
static void ring_iowrite_prod(struct tb_ring *ring, u16 prod)
{
/* See ring_iowrite_cons() above for explanation */
iowrite32(prod << 16, ring_desc_base(ring) + 8);
}
static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
{
iowrite32(value, ring_desc_base(ring) + offset);
}
static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
{
iowrite32(value, ring_desc_base(ring) + offset);
iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
}
static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
{
iowrite32(value, ring_options_base(ring) + offset);
}
static bool ring_full(struct tb_ring *ring)
{
return ((ring->head + 1) % ring->size) == ring->tail;
}
static bool ring_empty(struct tb_ring *ring)
{
return ring->head == ring->tail;
}
/*
* ring_write_descriptors() - post frames from ring->queue to the controller
*
* ring->lock is held.
*/
static void ring_write_descriptors(struct tb_ring *ring)
{
struct ring_frame *frame, *n;
struct ring_desc *descriptor;
list_for_each_entry_safe(frame, n, &ring->queue, list) {
if (ring_full(ring))
break;
list_move_tail(&frame->list, &ring->in_flight);
descriptor = &ring->descriptors[ring->head];
descriptor->phys = frame->buffer_phy;
descriptor->time = 0;
descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
if (ring->is_tx) {
descriptor->length = frame->size;
descriptor->eof = frame->eof;
descriptor->sof = frame->sof;
}
ring->head = (ring->head + 1) % ring->size;
if (ring->is_tx)
ring_iowrite_prod(ring, ring->head);
else
ring_iowrite_cons(ring, ring->head);
}
}
/*
* ring_work() - progress completed frames
*
* If the ring is shutting down then all frames are marked as canceled and
* their callbacks are invoked.
*
* Otherwise we collect all completed frame from the ring buffer, write new
* frame to the ring buffer and invoke the callbacks for the completed frames.
*/
static void ring_work(struct work_struct *work)
{
struct tb_ring *ring = container_of(work, typeof(*ring), work);
struct ring_frame *frame;
bool canceled = false;
unsigned long flags;
LIST_HEAD(done);
spin_lock_irqsave(&ring->lock, flags);
if (!ring->running) {
/* Move all frames to done and mark them as canceled. */
list_splice_tail_init(&ring->in_flight, &done);
list_splice_tail_init(&ring->queue, &done);
canceled = true;
goto invoke_callback;
}
while (!ring_empty(ring)) {
if (!(ring->descriptors[ring->tail].flags
& RING_DESC_COMPLETED))
break;
frame = list_first_entry(&ring->in_flight, typeof(*frame),
list);
list_move_tail(&frame->list, &done);
if (!ring->is_tx) {
frame->size = ring->descriptors[ring->tail].length;
frame->eof = ring->descriptors[ring->tail].eof;
frame->sof = ring->descriptors[ring->tail].sof;
frame->flags = ring->descriptors[ring->tail].flags;
}
ring->tail = (ring->tail + 1) % ring->size;
}
ring_write_descriptors(ring);
invoke_callback:
/* allow callbacks to schedule new work */
spin_unlock_irqrestore(&ring->lock, flags);
while (!list_empty(&done)) {
frame = list_first_entry(&done, typeof(*frame), list);
/*
* The callback may reenqueue or delete frame.
* Do not hold on to it.
*/
list_del_init(&frame->list);
if (frame->callback)
frame->callback(ring, frame, canceled);
}
}
int __tb_ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&ring->lock, flags);
if (ring->running) {
list_add_tail(&frame->list, &ring->queue);
ring_write_descriptors(ring);
} else {
ret = -ESHUTDOWN;
}
spin_unlock_irqrestore(&ring->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(__tb_ring_enqueue);
/**
* tb_ring_poll() - Poll one completed frame from the ring
* @ring: Ring to poll
*
* This function can be called when @start_poll callback of the @ring
* has been called. It will read one completed frame from the ring and
* return it to the caller. Returns %NULL if there is no more completed
* frames.
*/
struct ring_frame *tb_ring_poll(struct tb_ring *ring)
{
struct ring_frame *frame = NULL;
unsigned long flags;
spin_lock_irqsave(&ring->lock, flags);
if (!ring->running)
goto unlock;
if (ring_empty(ring))
goto unlock;
if (ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED) {
frame = list_first_entry(&ring->in_flight, typeof(*frame),
list);
list_del_init(&frame->list);
if (!ring->is_tx) {
frame->size = ring->descriptors[ring->tail].length;
frame->eof = ring->descriptors[ring->tail].eof;
frame->sof = ring->descriptors[ring->tail].sof;
frame->flags = ring->descriptors[ring->tail].flags;
}
ring->tail = (ring->tail + 1) % ring->size;
}
unlock:
spin_unlock_irqrestore(&ring->lock, flags);
return frame;
}
EXPORT_SYMBOL_GPL(tb_ring_poll);
static void __ring_interrupt_mask(struct tb_ring *ring, bool mask)
{
int idx = ring_interrupt_index(ring);
int reg = REG_RING_INTERRUPT_BASE + idx / 32 * 4;
int bit = idx % 32;
u32 val;
val = ioread32(ring->nhi->iobase + reg);
if (mask)
val &= ~BIT(bit);
else
val |= BIT(bit);
iowrite32(val, ring->nhi->iobase + reg);
}
/* Both @nhi->lock and @ring->lock should be held */
static void __ring_interrupt(struct tb_ring *ring)
{
if (!ring->running)
return;
if (ring->start_poll) {
__ring_interrupt_mask(ring, true);
ring->start_poll(ring->poll_data);
} else {
schedule_work(&ring->work);
}
}
/**
* tb_ring_poll_complete() - Re-start interrupt for the ring
* @ring: Ring to re-start the interrupt
*
* This will re-start (unmask) the ring interrupt once the user is done
* with polling.
*/
void tb_ring_poll_complete(struct tb_ring *ring)
{
unsigned long flags;
spin_lock_irqsave(&ring->nhi->lock, flags);
spin_lock(&ring->lock);
if (ring->start_poll)
__ring_interrupt_mask(ring, false);
spin_unlock(&ring->lock);
spin_unlock_irqrestore(&ring->nhi->lock, flags);
}
EXPORT_SYMBOL_GPL(tb_ring_poll_complete);
static void ring_clear_msix(const struct tb_ring *ring)
{
int bit;
if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
return;
bit = ring_interrupt_index(ring) & 31;
if (ring->is_tx)
iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
else
iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
4 * (ring->nhi->hop_count / 32));
}
static irqreturn_t ring_msix(int irq, void *data)
{
struct tb_ring *ring = data;
spin_lock(&ring->nhi->lock);
ring_clear_msix(ring);
spin_lock(&ring->lock);
__ring_interrupt(ring);
spin_unlock(&ring->lock);
spin_unlock(&ring->nhi->lock);
return IRQ_HANDLED;
}
static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
{
struct tb_nhi *nhi = ring->nhi;
unsigned long irqflags;
int ret;
if (!nhi->pdev->msix_enabled)
return 0;
ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
if (ret < 0)
return ret;
ring->vector = ret;
ret = pci_irq_vector(ring->nhi->pdev, ring->vector);
if (ret < 0)
goto err_ida_remove;
ring->irq = ret;
irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
if (ret)
goto err_ida_remove;
return 0;
err_ida_remove:
ida_simple_remove(&nhi->msix_ida, ring->vector);
return ret;
}
static void ring_release_msix(struct tb_ring *ring)
{
if (ring->irq <= 0)
return;
free_irq(ring->irq, ring);
ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
ring->vector = 0;
ring->irq = 0;
}
static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
{
unsigned int start_hop = RING_FIRST_USABLE_HOPID;
int ret = 0;
if (nhi->quirks & QUIRK_E2E) {
start_hop = RING_FIRST_USABLE_HOPID + 1;
if (ring->flags & RING_FLAG_E2E && !ring->is_tx) {
dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n",
ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID);
ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID;
}
}
spin_lock_irq(&nhi->lock);
if (ring->hop < 0) {
unsigned int i;
/*
* Automatically allocate HopID from the non-reserved
* range 1 .. hop_count - 1.
*/
for (i = start_hop; i < nhi->hop_count; i++) {
if (ring->is_tx) {
if (!nhi->tx_rings[i]) {
ring->hop = i;
break;
}
} else {
if (!nhi->rx_rings[i]) {
ring->hop = i;
break;
}
}
}
}
if (ring->hop > 0 && ring->hop < start_hop) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
goto err_unlock;
}
if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
goto err_unlock;
}
if (ring->is_tx && nhi->tx_rings[ring->hop]) {
dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
ring->hop);
ret = -EBUSY;
goto err_unlock;
}
if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
ring->hop);
ret = -EBUSY;
goto err_unlock;
}
if (ring->is_tx)
nhi->tx_rings[ring->hop] = ring;
else
nhi->rx_rings[ring->hop] = ring;
err_unlock:
spin_unlock_irq(&nhi->lock);
return ret;
}
static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
bool transmit, unsigned int flags,
int e2e_tx_hop, u16 sof_mask, u16 eof_mask,
void (*start_poll)(void *),
void *poll_data)
{
struct tb_ring *ring = NULL;
dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
transmit ? "TX" : "RX", hop, size);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
return NULL;
spin_lock_init(&ring->lock);
INIT_LIST_HEAD(&ring->queue);
INIT_LIST_HEAD(&ring->in_flight);
INIT_WORK(&ring->work, ring_work);
ring->nhi = nhi;
ring->hop = hop;
ring->is_tx = transmit;
ring->size = size;
ring->flags = flags;
ring->e2e_tx_hop = e2e_tx_hop;
ring->sof_mask = sof_mask;
ring->eof_mask = eof_mask;
ring->head = 0;
ring->tail = 0;
ring->running = false;
ring->start_poll = start_poll;
ring->poll_data = poll_data;
ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
size * sizeof(*ring->descriptors),
&ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
if (!ring->descriptors)
goto err_free_ring;
if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
goto err_free_descs;
if (nhi_alloc_hop(nhi, ring))
goto err_release_msix;
return ring;
err_release_msix:
ring_release_msix(ring);
err_free_descs:
dma_free_coherent(&ring->nhi->pdev->dev,
ring->size * sizeof(*ring->descriptors),
ring->descriptors, ring->descriptors_dma);
err_free_ring:
kfree(ring);
return NULL;
}
/**
* tb_ring_alloc_tx() - Allocate DMA ring for transmit
* @nhi: Pointer to the NHI the ring is to be allocated
* @hop: HopID (ring) to allocate
* @size: Number of entries in the ring
* @flags: Flags for the ring
*/
struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags)
{
return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
/**
* tb_ring_alloc_rx() - Allocate DMA ring for receive
* @nhi: Pointer to the NHI the ring is to be allocated
* @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
* @size: Number of entries in the ring
* @flags: Flags for the ring
* @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags
* @sof_mask: Mask of PDF values that start a frame
* @eof_mask: Mask of PDF values that end a frame
* @start_poll: If not %NULL the ring will call this function when an
* interrupt is triggered and masked, instead of callback
* in each Rx frame.
* @poll_data: Optional data passed to @start_poll
*/
struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
unsigned int flags, int e2e_tx_hop,
u16 sof_mask, u16 eof_mask,
void (*start_poll)(void *), void *poll_data)
{
return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask,
start_poll, poll_data);
}
EXPORT_SYMBOL_GPL(tb_ring_alloc_rx);
/**
* tb_ring_start() - enable a ring
* @ring: Ring to start
*
* Must not be invoked in parallel with tb_ring_stop().
*/
void tb_ring_start(struct tb_ring *ring)
{
u16 frame_size;
u32 flags;
spin_lock_irq(&ring->nhi->lock);
spin_lock(&ring->lock);
if (ring->nhi->going_away)
goto err;
if (ring->running) {
dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
goto err;
}
dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
RING_TYPE(ring), ring->hop);
if (ring->flags & RING_FLAG_FRAME) {
/* Means 4096 */
frame_size = 0;
flags = RING_FLAG_ENABLE;
} else {
frame_size = TB_FRAME_SIZE;
flags = RING_FLAG_ENABLE | RING_FLAG_RAW;
}
ring_iowrite64desc(ring, ring->descriptors_dma, 0);
if (ring->is_tx) {
ring_iowrite32desc(ring, ring->size, 12);
ring_iowrite32options(ring, 0, 4); /* time releated ? */
ring_iowrite32options(ring, flags, 0);
} else {
u32 sof_eof_mask = ring->sof_mask << 16 | ring->eof_mask;
ring_iowrite32desc(ring, (frame_size << 16) | ring->size, 12);
ring_iowrite32options(ring, sof_eof_mask, 4);
ring_iowrite32options(ring, flags, 0);
}
/*
* Now that the ring valid bit is set we can configure E2E if
* enabled for the ring.
*/
if (ring->flags & RING_FLAG_E2E) {
if (!ring->is_tx) {
u32 hop;
hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT;
hop &= REG_RX_OPTIONS_E2E_HOP_MASK;
flags |= hop;
dev_dbg(&ring->nhi->pdev->dev,
"enabling E2E for %s %d with TX HopID %d\n",
RING_TYPE(ring), ring->hop, ring->e2e_tx_hop);
} else {
dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n",
RING_TYPE(ring), ring->hop);
}
flags |= RING_FLAG_E2E_FLOW_CONTROL;
ring_iowrite32options(ring, flags, 0);
}
ring_interrupt_active(ring, true);
ring->running = true;
err:
spin_unlock(&ring->lock);
spin_unlock_irq(&ring->nhi->lock);
}
EXPORT_SYMBOL_GPL(tb_ring_start);
/**
* tb_ring_stop() - shutdown a ring
* @ring: Ring to stop
*
* Must not be invoked from a callback.
*
* This method will disable the ring. Further calls to
* tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
* called.
*
* All enqueued frames will be canceled and their callbacks will be executed
* with frame->canceled set to true (on the callback thread). This method
* returns only after all callback invocations have finished.
*/
void tb_ring_stop(struct tb_ring *ring)
{
spin_lock_irq(&ring->nhi->lock);
spin_lock(&ring->lock);
dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
RING_TYPE(ring), ring->hop);
if (ring->nhi->going_away)
goto err;
if (!ring->running) {
dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
RING_TYPE(ring), ring->hop);
goto err;
}
ring_interrupt_active(ring, false);
ring_iowrite32options(ring, 0, 0);
ring_iowrite64desc(ring, 0, 0);
ring_iowrite32desc(ring, 0, 8);
ring_iowrite32desc(ring, 0, 12);
ring->head = 0;
ring->tail = 0;
ring->running = false;
err:
spin_unlock(&ring->lock);
spin_unlock_irq(&ring->nhi->lock);
/*
* schedule ring->work to invoke callbacks on all remaining frames.
*/
schedule_work(&ring->work);
flush_work(&ring->work);
}
EXPORT_SYMBOL_GPL(tb_ring_stop);
/*
* tb_ring_free() - free ring
*
* When this method returns all invocations of ring->callback will have
* finished.
*
* Ring must be stopped.
*
* Must NOT be called from ring_frame->callback!
*/
void tb_ring_free(struct tb_ring *ring)
{
spin_lock_irq(&ring->nhi->lock);
/*
* Dissociate the ring from the NHI. This also ensures that
* nhi_interrupt_work cannot reschedule ring->work.
*/
if (ring->is_tx)
ring->nhi->tx_rings[ring->hop] = NULL;
else
ring->nhi->rx_rings[ring->hop] = NULL;
if (ring->running) {
dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
RING_TYPE(ring), ring->hop);
}
spin_unlock_irq(&ring->nhi->lock);
ring_release_msix(ring);
dma_free_coherent(&ring->nhi->pdev->dev,
ring->size * sizeof(*ring->descriptors),
ring->descriptors, ring->descriptors_dma);
ring->descriptors = NULL;
ring->descriptors_dma = 0;
dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
ring->hop);
/*
* ring->work can no longer be scheduled (it is scheduled only
* by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
* to finish before freeing the ring.
*/
flush_work(&ring->work);
kfree(ring);
}
EXPORT_SYMBOL_GPL(tb_ring_free);
/**
* nhi_mailbox_cmd() - Send a command through NHI mailbox
* @nhi: Pointer to the NHI structure
* @cmd: Command to send
* @data: Data to be send with the command
*
* Sends mailbox command to the firmware running on NHI. Returns %0 in
* case of success and negative errno in case of failure.
*/
int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data)
{
ktime_t timeout;
u32 val;
iowrite32(data, nhi->iobase + REG_INMAIL_DATA);
val = ioread32(nhi->iobase + REG_INMAIL_CMD);
val &= ~(REG_INMAIL_CMD_MASK | REG_INMAIL_ERROR);
val |= REG_INMAIL_OP_REQUEST | cmd;
iowrite32(val, nhi->iobase + REG_INMAIL_CMD);
timeout = ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT);
do {
val = ioread32(nhi->iobase + REG_INMAIL_CMD);
if (!(val & REG_INMAIL_OP_REQUEST))
break;
usleep_range(10, 20);
} while (ktime_before(ktime_get(), timeout));
if (val & REG_INMAIL_OP_REQUEST)
return -ETIMEDOUT;
if (val & REG_INMAIL_ERROR)
return -EIO;
return 0;
}
/**
* nhi_mailbox_mode() - Return current firmware operation mode
* @nhi: Pointer to the NHI structure
*
* The function reads current firmware operation mode using NHI mailbox
* registers and returns it to the caller.
*/
enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi)
{
u32 val;
val = ioread32(nhi->iobase + REG_OUTMAIL_CMD);
val &= REG_OUTMAIL_CMD_OPMODE_MASK;
val >>= REG_OUTMAIL_CMD_OPMODE_SHIFT;
return (enum nhi_fw_mode)val;
}
static void nhi_interrupt_work(struct work_struct *work)
{
struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
int value = 0; /* Suppress uninitialized usage warning. */
int bit;
int hop = -1;
int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
struct tb_ring *ring;
spin_lock_irq(&nhi->lock);
/*
* Starting at REG_RING_NOTIFY_BASE there are three status bitfields
* (TX, RX, RX overflow). We iterate over the bits and read a new
* dwords as required. The registers are cleared on read.
*/
for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
if (bit % 32 == 0)
value = ioread32(nhi->iobase
+ REG_RING_NOTIFY_BASE
+ 4 * (bit / 32));
if (++hop == nhi->hop_count) {
hop = 0;
type++;
}
if ((value & (1 << (bit % 32))) == 0)
continue;
if (type == 2) {
dev_warn(&nhi->pdev->dev,
"RX overflow for ring %d\n",
hop);
continue;
}
if (type == 0)
ring = nhi->tx_rings[hop];
else
ring = nhi->rx_rings[hop];
if (ring == NULL) {
dev_warn(&nhi->pdev->dev,
"got interrupt for inactive %s ring %d\n",
type ? "RX" : "TX",
hop);
continue;
}
spin_lock(&ring->lock);
__ring_interrupt(ring);
spin_unlock(&ring->lock);
}
spin_unlock_irq(&nhi->lock);
}
static irqreturn_t nhi_msi(int irq, void *data)
{
struct tb_nhi *nhi = data;
schedule_work(&nhi->interrupt_work);
return IRQ_HANDLED;
}
static int __nhi_suspend_noirq(struct device *dev, bool wakeup)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
int ret;
ret = tb_domain_suspend_noirq(tb);
if (ret)
return ret;
if (nhi->ops && nhi->ops->suspend_noirq) {
ret = nhi->ops->suspend_noirq(tb->nhi, wakeup);
if (ret)
return ret;
}
return 0;
}
static int nhi_suspend_noirq(struct device *dev)
{
return __nhi_suspend_noirq(dev, device_may_wakeup(dev));
}
static int nhi_freeze_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
return tb_domain_freeze_noirq(tb);
}
static int nhi_thaw_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
return tb_domain_thaw_noirq(tb);
}
static bool nhi_wake_supported(struct pci_dev *pdev)
{
u8 val;
/*
* If power rails are sustainable for wakeup from S4 this
* property is set by the BIOS.
*/
if (device_property_read_u8(&pdev->dev, "WAKE_SUPPORTED", &val))
return !!val;
return true;
}
static int nhi_poweroff_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
bool wakeup;
wakeup = device_may_wakeup(dev) && nhi_wake_supported(pdev);
return __nhi_suspend_noirq(dev, wakeup);
}
static void nhi_enable_int_throttling(struct tb_nhi *nhi)
{
/* Throttling is specified in 256ns increments */
u32 throttle = DIV_ROUND_UP(128 * NSEC_PER_USEC, 256);
unsigned int i;
/*
* Configure interrupt throttling for all vectors even if we
* only use few.
*/
for (i = 0; i < MSIX_MAX_VECS; i++) {
u32 reg = REG_INT_THROTTLING_RATE + i * 4;
iowrite32(throttle, nhi->iobase + reg);
}
}
static int nhi_resume_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
int ret;
/*
* Check that the device is still there. It may be that the user
* unplugged last device which causes the host controller to go
* away on PCs.
*/
if (!pci_device_is_present(pdev)) {
nhi->going_away = true;
} else {
if (nhi->ops && nhi->ops->resume_noirq) {
ret = nhi->ops->resume_noirq(nhi);
if (ret)
return ret;
}
nhi_enable_int_throttling(tb->nhi);
}
return tb_domain_resume_noirq(tb);
}
static int nhi_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
return tb_domain_suspend(tb);
}
static void nhi_complete(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
/*
* If we were runtime suspended when system suspend started,
* schedule runtime resume now. It should bring the domain back
* to functional state.
*/
if (pm_runtime_suspended(&pdev->dev))
pm_runtime_resume(&pdev->dev);
else
tb_domain_complete(tb);
}
static int nhi_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
int ret;
ret = tb_domain_runtime_suspend(tb);
if (ret)
return ret;
if (nhi->ops && nhi->ops->runtime_suspend) {
ret = nhi->ops->runtime_suspend(tb->nhi);
if (ret)
return ret;
}
return 0;
}
static int nhi_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
int ret;
if (nhi->ops && nhi->ops->runtime_resume) {
ret = nhi->ops->runtime_resume(nhi);
if (ret)
return ret;
}
nhi_enable_int_throttling(nhi);
return tb_domain_runtime_resume(tb);
}
static void nhi_shutdown(struct tb_nhi *nhi)
{
int i;
dev_dbg(&nhi->pdev->dev, "shutdown\n");
for (i = 0; i < nhi->hop_count; i++) {
if (nhi->tx_rings[i])
dev_WARN(&nhi->pdev->dev,
"TX ring %d is still active\n", i);
if (nhi->rx_rings[i])
dev_WARN(&nhi->pdev->dev,
"RX ring %d is still active\n", i);
}
nhi_disable_interrupts(nhi);
/*
* We have to release the irq before calling flush_work. Otherwise an
* already executing IRQ handler could call schedule_work again.
*/
if (!nhi->pdev->msix_enabled) {
devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
flush_work(&nhi->interrupt_work);
}
ida_destroy(&nhi->msix_ida);
if (nhi->ops && nhi->ops->shutdown)
nhi->ops->shutdown(nhi);
}
static void nhi_check_quirks(struct tb_nhi *nhi)
{
if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) {
/*
* Intel hardware supports auto clear of the interrupt
* status register right after interrupt is being
* issued.
*/
nhi->quirks |= QUIRK_AUTO_CLEAR_INT;
switch (nhi->pdev->device) {
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI:
case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI:
/*
* Falcon Ridge controller needs the end-to-end
* flow control workaround to avoid losing Rx
* packets when RING_FLAG_E2E is set.
*/
nhi->quirks |= QUIRK_E2E;
break;
}
}
}
static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data)
{
if (!pdev->external_facing ||
!device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION))
return 0;
*(bool *)data = true;
return 1; /* Stop walking */
}
static void nhi_check_iommu(struct tb_nhi *nhi)
{
struct pci_bus *bus = nhi->pdev->bus;
bool port_ok = false;
/*
* Ideally what we'd do here is grab every PCI device that
* represents a tunnelling adapter for this NHI and check their
* status directly, but unfortunately USB4 seems to make it
* obnoxiously difficult to reliably make any correlation.
*
* So for now we'll have to bodge it... Hoping that the system
* is at least sane enough that an adapter is in the same PCI
* segment as its NHI, if we can find *something* on that segment
* which meets the requirements for Kernel DMA Protection, we'll
* take that to imply that firmware is aware and has (hopefully)
* done the right thing in general. We need to know that the PCI
* layer has seen the ExternalFacingPort property which will then
* inform the IOMMU layer to enforce the complete "untrusted DMA"
* flow, but also that the IOMMU driver itself can be trusted not
* to have been subverted by a pre-boot DMA attack.
*/
while (bus->parent)
bus = bus->parent;
pci_walk_bus(bus, nhi_check_iommu_pdev, &port_ok);
nhi->iommu_dma_protection = port_ok;
dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n",
str_enabled_disabled(port_ok));
}
static void nhi_reset(struct tb_nhi *nhi)
{
ktime_t timeout;
u32 val;
val = ioread32(nhi->iobase + REG_CAPS);
/* Reset only v2 and later routers */
if (FIELD_GET(REG_CAPS_VERSION_MASK, val) < REG_CAPS_VERSION_2)
return;
if (!host_reset) {
dev_dbg(&nhi->pdev->dev, "skipping host router reset\n");
return;
}
iowrite32(REG_RESET_HRR, nhi->iobase + REG_RESET);
msleep(100);
timeout = ktime_add_ms(ktime_get(), 500);
do {
val = ioread32(nhi->iobase + REG_RESET);
if (!(val & REG_RESET_HRR)) {
dev_warn(&nhi->pdev->dev, "host router reset successful\n");
return;
}
usleep_range(10, 20);
} while (ktime_before(ktime_get(), timeout));
dev_warn(&nhi->pdev->dev, "timeout resetting host router\n");
}
static int nhi_init_msi(struct tb_nhi *nhi)
{
struct pci_dev *pdev = nhi->pdev;
struct device *dev = &pdev->dev;
int res, irq, nvec;
/* In case someone left them on. */
nhi_disable_interrupts(nhi);
nhi_enable_int_throttling(nhi);
ida_init(&nhi->msix_ida);
/*
* The NHI has 16 MSI-X vectors or a single MSI. We first try to
* get all MSI-X vectors and if we succeed, each ring will have
* one MSI-X. If for some reason that does not work out, we
* fallback to a single MSI.
*/
nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
PCI_IRQ_MSIX);
if (nvec < 0) {
nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
if (nvec < 0)
return nvec;
INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
irq = pci_irq_vector(nhi->pdev, 0);
if (irq < 0)
return irq;
res = devm_request_irq(&pdev->dev, irq, nhi_msi,
IRQF_NO_SUSPEND, "thunderbolt", nhi);
if (res)
return dev_err_probe(dev, res, "request_irq failed, aborting\n");
}
return 0;
}
static bool nhi_imr_valid(struct pci_dev *pdev)
{
u8 val;
if (!device_property_read_u8(&pdev->dev, "IMR_VALID", &val))
return !!val;
return true;
}
static struct tb *nhi_select_cm(struct tb_nhi *nhi)
{
struct tb *tb;
/*
* USB4 case is simple. If we got control of any of the
* capabilities, we use software CM.
*/
if (tb_acpi_is_native())
return tb_probe(nhi);
/*
* Either firmware based CM is running (we did not get control
* from the firmware) or this is pre-USB4 PC so try first
* firmware CM and then fallback to software CM.
*/
tb = icm_probe(nhi);
if (!tb)
tb = tb_probe(nhi);
return tb;
}
static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
struct tb_nhi *nhi;
struct tb *tb;
int res;
if (!nhi_imr_valid(pdev))
return dev_err_probe(dev, -ENODEV, "firmware image not valid, aborting\n");
res = pcim_enable_device(pdev);
if (res)
return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n");
res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
if (res)
return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n");
nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
if (!nhi)
return -ENOMEM;
nhi->pdev = pdev;
nhi->ops = (const struct tb_nhi_ops *)id->driver_data;
/* cannot fail - table is allocated in pcim_iomap_regions */
nhi->iobase = pcim_iomap_table(pdev)[0];
nhi->hop_count = ioread32(nhi->iobase + REG_CAPS) & 0x3ff;
dev_dbg(dev, "total paths: %d\n", nhi->hop_count);
nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->tx_rings), GFP_KERNEL);
nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
sizeof(*nhi->rx_rings), GFP_KERNEL);
if (!nhi->tx_rings || !nhi->rx_rings)
return -ENOMEM;
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
nhi_reset(nhi);
res = nhi_init_msi(nhi);
if (res)
return dev_err_probe(dev, res, "cannot enable MSI, aborting\n");
spin_lock_init(&nhi->lock);
res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (res)
return dev_err_probe(dev, res, "failed to set DMA mask\n");
pci_set_master(pdev);
if (nhi->ops && nhi->ops->init) {
res = nhi->ops->init(nhi);
if (res)
return res;
}
tb = nhi_select_cm(nhi);
if (!tb)
return dev_err_probe(dev, -ENODEV,
"failed to determine connection manager, aborting\n");
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
res = tb_domain_add(tb);
if (res) {
/*
* At this point the RX/TX rings might already have been
* activated. Do a proper shutdown.
*/
tb_domain_put(tb);
nhi_shutdown(nhi);
return res;
}
pci_set_drvdata(pdev, tb);
device_wakeup_enable(&pdev->dev);
pm_runtime_allow(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
}
static void nhi_remove(struct pci_dev *pdev)
{
struct tb *tb = pci_get_drvdata(pdev);
struct tb_nhi *nhi = tb->nhi;
pm_runtime_get_sync(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_forbid(&pdev->dev);
tb_domain_remove(tb);
nhi_shutdown(nhi);
}
/*
* The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
* the tunnels asap. A corresponding pci quirk blocks the downstream bridges
* resume_noirq until we are done.
*/
static const struct dev_pm_ops nhi_pm_ops = {
.suspend_noirq = nhi_suspend_noirq,
.resume_noirq = nhi_resume_noirq,
.freeze_noirq = nhi_freeze_noirq, /*
* we just disable hotplug, the
* pci-tunnels stay alive.
*/
.thaw_noirq = nhi_thaw_noirq,
.restore_noirq = nhi_resume_noirq,
.suspend = nhi_suspend,
.poweroff_noirq = nhi_poweroff_noirq,
.poweroff = nhi_suspend,
.complete = nhi_complete,
.runtime_suspend = nhi_runtime_suspend,
.runtime_resume = nhi_runtime_resume,
};
static struct pci_device_id nhi_ids[] = {
/*
* We have to specify class, the TB bridges use the same device and
* vendor (sub)id on gen 1 and gen 2 controllers.
*/
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
.subvendor = 0x2222, .subdevice = 0x1111,
},
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
.subvendor = 0x2222, .subdevice = 0x1111,
},
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
},
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
},
/* Thunderbolt 3 */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
/* Thunderbolt 4 */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_M_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI0),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
.driver_data = (kernel_ulong_t)&icl_nhi_ops },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
/* Any USB4 compliant host */
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, nhi_ids);
MODULE_DESCRIPTION("Thunderbolt/USB4 core driver");
MODULE_LICENSE("GPL");
static struct pci_driver nhi_driver = {
.name = "thunderbolt",
.id_table = nhi_ids,
.probe = nhi_probe,
.remove = nhi_remove,
.shutdown = nhi_remove,
.driver.pm = &nhi_pm_ops,
};
static int __init nhi_init(void)
{
int ret;
ret = tb_domain_init();
if (ret)
return ret;
ret = pci_register_driver(&nhi_driver);
if (ret)
tb_domain_exit();
return ret;
}
static void __exit nhi_unload(void)
{
pci_unregister_driver(&nhi_driver);
tb_domain_exit();
}
rootfs_initcall(nhi_init);
module_exit(nhi_unload);
| linux-master | drivers/thunderbolt/nhi.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt XDomain discovery protocol support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <[email protected]>
* Mika Westerberg <[email protected]>
*/
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/kmod.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/prandom.h>
#include <linux/string_helpers.h>
#include <linux/utsname.h>
#include <linux/uuid.h>
#include <linux/workqueue.h>
#include "tb.h"
#define XDOMAIN_SHORT_TIMEOUT 100 /* ms */
#define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */
#define XDOMAIN_BONDING_TIMEOUT 10000 /* ms */
#define XDOMAIN_RETRIES 10
#define XDOMAIN_DEFAULT_MAX_HOPID 15
enum {
XDOMAIN_STATE_INIT,
XDOMAIN_STATE_UUID,
XDOMAIN_STATE_LINK_STATUS,
XDOMAIN_STATE_LINK_STATE_CHANGE,
XDOMAIN_STATE_LINK_STATUS2,
XDOMAIN_STATE_BONDING_UUID_LOW,
XDOMAIN_STATE_BONDING_UUID_HIGH,
XDOMAIN_STATE_PROPERTIES,
XDOMAIN_STATE_ENUMERATED,
XDOMAIN_STATE_ERROR,
};
static const char * const state_names[] = {
[XDOMAIN_STATE_INIT] = "INIT",
[XDOMAIN_STATE_UUID] = "UUID",
[XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS",
[XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE",
[XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2",
[XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW",
[XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH",
[XDOMAIN_STATE_PROPERTIES] = "PROPERTIES",
[XDOMAIN_STATE_ENUMERATED] = "ENUMERATED",
[XDOMAIN_STATE_ERROR] = "ERROR",
};
struct xdomain_request_work {
struct work_struct work;
struct tb_xdp_header *pkg;
struct tb *tb;
};
static bool tb_xdomain_enabled = true;
module_param_named(xdomain, tb_xdomain_enabled, bool, 0444);
MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)");
/*
* Serializes access to the properties and protocol handlers below. If
* you need to take both this lock and the struct tb_xdomain lock, take
* this one first.
*/
static DEFINE_MUTEX(xdomain_lock);
/* Properties exposed to the remote domains */
static struct tb_property_dir *xdomain_property_dir;
static u32 xdomain_property_block_gen;
/* Additional protocol handlers */
static LIST_HEAD(protocol_handlers);
/* UUID for XDomain discovery protocol: b638d70e-42ff-40bb-97c2-90e2c0b2ff07 */
static const uuid_t tb_xdp_uuid =
UUID_INIT(0xb638d70e, 0x42ff, 0x40bb,
0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07);
bool tb_is_xdomain_enabled(void)
{
return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed();
}
static bool tb_xdomain_match(const struct tb_cfg_request *req,
const struct ctl_pkg *pkg)
{
switch (pkg->frame.eof) {
case TB_CFG_PKG_ERROR:
return true;
case TB_CFG_PKG_XDOMAIN_RESP: {
const struct tb_xdp_header *res_hdr = pkg->buffer;
const struct tb_xdp_header *req_hdr = req->request;
if (pkg->frame.size < req->response_size / 4)
return false;
/* Make sure route matches */
if ((res_hdr->xd_hdr.route_hi & ~BIT(31)) !=
req_hdr->xd_hdr.route_hi)
return false;
if ((res_hdr->xd_hdr.route_lo) != req_hdr->xd_hdr.route_lo)
return false;
/* Check that the XDomain protocol matches */
if (!uuid_equal(&res_hdr->uuid, &req_hdr->uuid))
return false;
return true;
}
default:
return false;
}
}
static bool tb_xdomain_copy(struct tb_cfg_request *req,
const struct ctl_pkg *pkg)
{
memcpy(req->response, pkg->buffer, req->response_size);
req->result.err = 0;
return true;
}
static void response_ready(void *data)
{
tb_cfg_request_put(data);
}
static int __tb_xdomain_response(struct tb_ctl *ctl, const void *response,
size_t size, enum tb_cfg_pkg_type type)
{
struct tb_cfg_request *req;
req = tb_cfg_request_alloc();
if (!req)
return -ENOMEM;
req->match = tb_xdomain_match;
req->copy = tb_xdomain_copy;
req->request = response;
req->request_size = size;
req->request_type = type;
return tb_cfg_request(ctl, req, response_ready, req);
}
/**
* tb_xdomain_response() - Send a XDomain response message
* @xd: XDomain to send the message
* @response: Response to send
* @size: Size of the response
* @type: PDF type of the response
*
* This can be used to send a XDomain response message to the other
* domain. No response for the message is expected.
*
* Return: %0 in case of success and negative errno in case of failure
*/
int tb_xdomain_response(struct tb_xdomain *xd, const void *response,
size_t size, enum tb_cfg_pkg_type type)
{
return __tb_xdomain_response(xd->tb->ctl, response, size, type);
}
EXPORT_SYMBOL_GPL(tb_xdomain_response);
static int __tb_xdomain_request(struct tb_ctl *ctl, const void *request,
size_t request_size, enum tb_cfg_pkg_type request_type, void *response,
size_t response_size, enum tb_cfg_pkg_type response_type,
unsigned int timeout_msec)
{
struct tb_cfg_request *req;
struct tb_cfg_result res;
req = tb_cfg_request_alloc();
if (!req)
return -ENOMEM;
req->match = tb_xdomain_match;
req->copy = tb_xdomain_copy;
req->request = request;
req->request_size = request_size;
req->request_type = request_type;
req->response = response;
req->response_size = response_size;
req->response_type = response_type;
res = tb_cfg_request_sync(ctl, req, timeout_msec);
tb_cfg_request_put(req);
return res.err == 1 ? -EIO : res.err;
}
/**
* tb_xdomain_request() - Send a XDomain request
* @xd: XDomain to send the request
* @request: Request to send
* @request_size: Size of the request in bytes
* @request_type: PDF type of the request
* @response: Response is copied here
* @response_size: Expected size of the response in bytes
* @response_type: Expected PDF type of the response
* @timeout_msec: Timeout in milliseconds to wait for the response
*
* This function can be used to send XDomain control channel messages to
* the other domain. The function waits until the response is received
* or when timeout triggers. Whichever comes first.
*
* Return: %0 in case of success and negative errno in case of failure
*/
int tb_xdomain_request(struct tb_xdomain *xd, const void *request,
size_t request_size, enum tb_cfg_pkg_type request_type,
void *response, size_t response_size,
enum tb_cfg_pkg_type response_type, unsigned int timeout_msec)
{
return __tb_xdomain_request(xd->tb->ctl, request, request_size,
request_type, response, response_size,
response_type, timeout_msec);
}
EXPORT_SYMBOL_GPL(tb_xdomain_request);
static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
u8 sequence, enum tb_xdp_type type, size_t size)
{
u32 length_sn;
length_sn = (size - sizeof(hdr->xd_hdr)) / 4;
length_sn |= (sequence << TB_XDOMAIN_SN_SHIFT) & TB_XDOMAIN_SN_MASK;
hdr->xd_hdr.route_hi = upper_32_bits(route);
hdr->xd_hdr.route_lo = lower_32_bits(route);
hdr->xd_hdr.length_sn = length_sn;
hdr->type = type;
memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
}
static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
{
if (res->hdr.type != ERROR_RESPONSE)
return 0;
switch (res->error) {
case ERROR_UNKNOWN_PACKET:
case ERROR_UNKNOWN_DOMAIN:
return -EIO;
case ERROR_NOT_SUPPORTED:
return -ENOTSUPP;
case ERROR_NOT_READY:
return -EAGAIN;
default:
break;
}
return 0;
}
static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
uuid_t *uuid, u64 *remote_route)
{
struct tb_xdp_uuid_response res;
struct tb_xdp_uuid req;
int ret;
memset(&req, 0, sizeof(req));
tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST,
sizeof(req));
memset(&res, 0, sizeof(res));
ret = __tb_xdomain_request(ctl, &req, sizeof(req),
TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
TB_CFG_PKG_XDOMAIN_RESP,
XDOMAIN_DEFAULT_TIMEOUT);
if (ret)
return ret;
ret = tb_xdp_handle_error(&res.err);
if (ret)
return ret;
uuid_copy(uuid, &res.src_uuid);
*remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo;
return 0;
}
static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence,
const uuid_t *uuid)
{
struct tb_xdp_uuid_response res;
memset(&res, 0, sizeof(res));
tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE,
sizeof(res));
uuid_copy(&res.src_uuid, uuid);
res.src_route_hi = upper_32_bits(route);
res.src_route_lo = lower_32_bits(route);
return __tb_xdomain_response(ctl, &res, sizeof(res),
TB_CFG_PKG_XDOMAIN_RESP);
}
static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence,
enum tb_xdp_error error)
{
struct tb_xdp_error_response res;
memset(&res, 0, sizeof(res));
tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE,
sizeof(res));
res.error = error;
return __tb_xdomain_response(ctl, &res, sizeof(res),
TB_CFG_PKG_XDOMAIN_RESP);
}
static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
const uuid_t *src_uuid, const uuid_t *dst_uuid, int retry,
u32 **block, u32 *generation)
{
struct tb_xdp_properties_response *res;
struct tb_xdp_properties req;
u16 data_len, len;
size_t total_size;
u32 *data = NULL;
int ret;
total_size = sizeof(*res) + TB_XDP_PROPERTIES_MAX_DATA_LENGTH * 4;
res = kzalloc(total_size, GFP_KERNEL);
if (!res)
return -ENOMEM;
memset(&req, 0, sizeof(req));
tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST,
sizeof(req));
memcpy(&req.src_uuid, src_uuid, sizeof(*src_uuid));
memcpy(&req.dst_uuid, dst_uuid, sizeof(*dst_uuid));
data_len = 0;
do {
ret = __tb_xdomain_request(ctl, &req, sizeof(req),
TB_CFG_PKG_XDOMAIN_REQ, res,
total_size, TB_CFG_PKG_XDOMAIN_RESP,
XDOMAIN_DEFAULT_TIMEOUT);
if (ret)
goto err;
ret = tb_xdp_handle_error(&res->err);
if (ret)
goto err;
/*
* Package length includes the whole payload without the
* XDomain header. Validate first that the package is at
* least size of the response structure.
*/
len = res->hdr.xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
if (len < sizeof(*res) / 4) {
ret = -EINVAL;
goto err;
}
len += sizeof(res->hdr.xd_hdr) / 4;
len -= sizeof(*res) / 4;
if (res->offset != req.offset) {
ret = -EINVAL;
goto err;
}
/*
* First time allocate block that has enough space for
* the whole properties block.
*/
if (!data) {
data_len = res->data_length;
if (data_len > TB_XDP_PROPERTIES_MAX_LENGTH) {
ret = -E2BIG;
goto err;
}
data = kcalloc(data_len, sizeof(u32), GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
goto err;
}
}
memcpy(data + req.offset, res->data, len * 4);
req.offset += len;
} while (!data_len || req.offset < data_len);
*block = data;
*generation = res->generation;
kfree(res);
return data_len;
err:
kfree(data);
kfree(res);
return ret;
}
static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl,
struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req)
{
struct tb_xdp_properties_response *res;
size_t total_size;
u16 len;
int ret;
/*
* Currently we expect all requests to be directed to us. The
* protocol supports forwarding, though which we might add
* support later on.
*/
if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) {
tb_xdp_error_response(ctl, xd->route, sequence,
ERROR_UNKNOWN_DOMAIN);
return 0;
}
mutex_lock(&xd->lock);
if (req->offset >= xd->local_property_block_len) {
mutex_unlock(&xd->lock);
return -EINVAL;
}
len = xd->local_property_block_len - req->offset;
len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH);
total_size = sizeof(*res) + len * 4;
res = kzalloc(total_size, GFP_KERNEL);
if (!res) {
mutex_unlock(&xd->lock);
return -ENOMEM;
}
tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE,
total_size);
res->generation = xd->local_property_block_gen;
res->data_length = xd->local_property_block_len;
res->offset = req->offset;
uuid_copy(&res->src_uuid, xd->local_uuid);
uuid_copy(&res->dst_uuid, &req->src_uuid);
memcpy(res->data, &xd->local_property_block[req->offset], len * 4);
mutex_unlock(&xd->lock);
ret = __tb_xdomain_response(ctl, res, total_size,
TB_CFG_PKG_XDOMAIN_RESP);
kfree(res);
return ret;
}
static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
int retry, const uuid_t *uuid)
{
struct tb_xdp_properties_changed_response res;
struct tb_xdp_properties_changed req;
int ret;
memset(&req, 0, sizeof(req));
tb_xdp_fill_header(&req.hdr, route, retry % 4,
PROPERTIES_CHANGED_REQUEST, sizeof(req));
uuid_copy(&req.src_uuid, uuid);
memset(&res, 0, sizeof(res));
ret = __tb_xdomain_request(ctl, &req, sizeof(req),
TB_CFG_PKG_XDOMAIN_REQ, &res, sizeof(res),
TB_CFG_PKG_XDOMAIN_RESP,
XDOMAIN_DEFAULT_TIMEOUT);
if (ret)
return ret;
return tb_xdp_handle_error(&res.err);
}
static int
tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence)
{
struct tb_xdp_properties_changed_response res;
memset(&res, 0, sizeof(res));
tb_xdp_fill_header(&res.hdr, route, sequence,
PROPERTIES_CHANGED_RESPONSE, sizeof(res));
return __tb_xdomain_response(ctl, &res, sizeof(res),
TB_CFG_PKG_XDOMAIN_RESP);
}
static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route,
u8 sequence, u8 *slw, u8 *tlw,
u8 *sls, u8 *tls)
{
struct tb_xdp_link_state_status_response res;
struct tb_xdp_link_state_status req;
int ret;
memset(&req, 0, sizeof(req));
tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST,
sizeof(req));
memset(&res, 0, sizeof(res));
ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
&res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
XDOMAIN_DEFAULT_TIMEOUT);
if (ret)
return ret;
ret = tb_xdp_handle_error(&res.err);
if (ret)
return ret;
if (res.status != 0)
return -EREMOTEIO;
*slw = res.slw;
*tlw = res.tlw;
*sls = res.sls;
*tls = res.tls;
return 0;
}
static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl,
struct tb_xdomain *xd, u8 sequence)
{
struct tb_xdp_link_state_status_response res;
struct tb_port *port = tb_xdomain_downstream_port(xd);
u32 val[2];
int ret;
memset(&res, 0, sizeof(res));
tb_xdp_fill_header(&res.hdr, xd->route, sequence,
LINK_STATE_STATUS_RESPONSE, sizeof(res));
ret = tb_port_read(port, val, TB_CFG_PORT,
port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val));
if (ret)
return ret;
res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >>
LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT;
res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >>
LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT;
res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK;
res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >>
LANE_ADP_CS_1_TARGET_WIDTH_SHIFT;
return __tb_xdomain_response(ctl, &res, sizeof(res),
TB_CFG_PKG_XDOMAIN_RESP);
}
static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route,
u8 sequence, u8 tlw, u8 tls)
{
struct tb_xdp_link_state_change_response res;
struct tb_xdp_link_state_change req;
int ret;
memset(&req, 0, sizeof(req));
tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST,
sizeof(req));
req.tlw = tlw;
req.tls = tls;
memset(&res, 0, sizeof(res));
ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ,
&res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP,
XDOMAIN_DEFAULT_TIMEOUT);
if (ret)
return ret;
ret = tb_xdp_handle_error(&res.err);
if (ret)
return ret;
return res.status != 0 ? -EREMOTEIO : 0;
}
static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route,
u8 sequence, u32 status)
{
struct tb_xdp_link_state_change_response res;
memset(&res, 0, sizeof(res));
tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE,
sizeof(res));
res.status = status;
return __tb_xdomain_response(ctl, &res, sizeof(res),
TB_CFG_PKG_XDOMAIN_RESP);
}
/**
* tb_register_protocol_handler() - Register protocol handler
* @handler: Handler to register
*
* This allows XDomain service drivers to hook into incoming XDomain
* messages. After this function is called the service driver needs to
* be able to handle calls to callback whenever a package with the
* registered protocol is received.
*/
int tb_register_protocol_handler(struct tb_protocol_handler *handler)
{
if (!handler->uuid || !handler->callback)
return -EINVAL;
if (uuid_equal(handler->uuid, &tb_xdp_uuid))
return -EINVAL;
mutex_lock(&xdomain_lock);
list_add_tail(&handler->list, &protocol_handlers);
mutex_unlock(&xdomain_lock);
return 0;
}
EXPORT_SYMBOL_GPL(tb_register_protocol_handler);
/**
* tb_unregister_protocol_handler() - Unregister protocol handler
* @handler: Handler to unregister
*
* Removes the previously registered protocol handler.
*/
void tb_unregister_protocol_handler(struct tb_protocol_handler *handler)
{
mutex_lock(&xdomain_lock);
list_del_init(&handler->list);
mutex_unlock(&xdomain_lock);
}
EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler);
static void update_property_block(struct tb_xdomain *xd)
{
mutex_lock(&xdomain_lock);
mutex_lock(&xd->lock);
/*
* If the local property block is not up-to-date, rebuild it now
* based on the global property template.
*/
if (!xd->local_property_block ||
xd->local_property_block_gen < xdomain_property_block_gen) {
struct tb_property_dir *dir;
int ret, block_len;
u32 *block;
dir = tb_property_copy_dir(xdomain_property_dir);
if (!dir) {
dev_warn(&xd->dev, "failed to copy properties\n");
goto out_unlock;
}
/* Fill in non-static properties now */
tb_property_add_text(dir, "deviceid", utsname()->nodename);
tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid);
ret = tb_property_format_dir(dir, NULL, 0);
if (ret < 0) {
dev_warn(&xd->dev, "local property block creation failed\n");
tb_property_free_dir(dir);
goto out_unlock;
}
block_len = ret;
block = kcalloc(block_len, sizeof(*block), GFP_KERNEL);
if (!block) {
tb_property_free_dir(dir);
goto out_unlock;
}
ret = tb_property_format_dir(dir, block, block_len);
if (ret) {
dev_warn(&xd->dev, "property block generation failed\n");
tb_property_free_dir(dir);
kfree(block);
goto out_unlock;
}
tb_property_free_dir(dir);
/* Release the previous block */
kfree(xd->local_property_block);
/* Assign new one */
xd->local_property_block = block;
xd->local_property_block_len = block_len;
xd->local_property_block_gen = xdomain_property_block_gen;
}
out_unlock:
mutex_unlock(&xd->lock);
mutex_unlock(&xdomain_lock);
}
static void tb_xdp_handle_request(struct work_struct *work)
{
struct xdomain_request_work *xw = container_of(work, typeof(*xw), work);
const struct tb_xdp_header *pkg = xw->pkg;
const struct tb_xdomain_header *xhdr = &pkg->xd_hdr;
struct tb *tb = xw->tb;
struct tb_ctl *ctl = tb->ctl;
struct tb_xdomain *xd;
const uuid_t *uuid;
int ret = 0;
u32 sequence;
u64 route;
route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63);
sequence = xhdr->length_sn & TB_XDOMAIN_SN_MASK;
sequence >>= TB_XDOMAIN_SN_SHIFT;
mutex_lock(&tb->lock);
if (tb->root_switch)
uuid = tb->root_switch->uuid;
else
uuid = NULL;
mutex_unlock(&tb->lock);
if (!uuid) {
tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY);
goto out;
}
xd = tb_xdomain_find_by_route_locked(tb, route);
if (xd)
update_property_block(xd);
switch (pkg->type) {
case PROPERTIES_REQUEST:
tb_dbg(tb, "%llx: received XDomain properties request\n", route);
if (xd) {
ret = tb_xdp_properties_response(tb, ctl, xd, sequence,
(const struct tb_xdp_properties *)pkg);
}
break;
case PROPERTIES_CHANGED_REQUEST:
tb_dbg(tb, "%llx: received XDomain properties changed request\n",
route);
ret = tb_xdp_properties_changed_response(ctl, route, sequence);
/*
* Since the properties have been changed, let's update
* the xdomain related to this connection as well in
* case there is a change in services it offers.
*/
if (xd && device_is_registered(&xd->dev))
queue_delayed_work(tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
break;
case UUID_REQUEST_OLD:
case UUID_REQUEST:
tb_dbg(tb, "%llx: received XDomain UUID request\n", route);
ret = tb_xdp_uuid_response(ctl, route, sequence, uuid);
break;
case LINK_STATE_STATUS_REQUEST:
tb_dbg(tb, "%llx: received XDomain link state status request\n",
route);
if (xd) {
ret = tb_xdp_link_state_status_response(tb, ctl, xd,
sequence);
} else {
tb_xdp_error_response(ctl, route, sequence,
ERROR_NOT_READY);
}
break;
case LINK_STATE_CHANGE_REQUEST:
tb_dbg(tb, "%llx: received XDomain link state change request\n",
route);
if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) {
const struct tb_xdp_link_state_change *lsc =
(const struct tb_xdp_link_state_change *)pkg;
ret = tb_xdp_link_state_change_response(ctl, route,
sequence, 0);
xd->target_link_width = lsc->tlw;
queue_delayed_work(tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
} else {
tb_xdp_error_response(ctl, route, sequence,
ERROR_NOT_READY);
}
break;
default:
tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type);
tb_xdp_error_response(ctl, route, sequence,
ERROR_NOT_SUPPORTED);
break;
}
tb_xdomain_put(xd);
if (ret) {
tb_warn(tb, "failed to send XDomain response for %#x\n",
pkg->type);
}
out:
kfree(xw->pkg);
kfree(xw);
tb_domain_put(tb);
}
static bool
tb_xdp_schedule_request(struct tb *tb, const struct tb_xdp_header *hdr,
size_t size)
{
struct xdomain_request_work *xw;
xw = kmalloc(sizeof(*xw), GFP_KERNEL);
if (!xw)
return false;
INIT_WORK(&xw->work, tb_xdp_handle_request);
xw->pkg = kmemdup(hdr, size, GFP_KERNEL);
if (!xw->pkg) {
kfree(xw);
return false;
}
xw->tb = tb_domain_get(tb);
schedule_work(&xw->work);
return true;
}
/**
* tb_register_service_driver() - Register XDomain service driver
* @drv: Driver to register
*
* Registers new service driver from @drv to the bus.
*/
int tb_register_service_driver(struct tb_service_driver *drv)
{
drv->driver.bus = &tb_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL_GPL(tb_register_service_driver);
/**
* tb_unregister_service_driver() - Unregister XDomain service driver
* @drv: Driver to unregister
*
* Unregisters XDomain service driver from the bus.
*/
void tb_unregister_service_driver(struct tb_service_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL_GPL(tb_unregister_service_driver);
static ssize_t key_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
/*
* It should be null terminated but anything else is pretty much
* allowed.
*/
return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key);
}
static DEVICE_ATTR_RO(key);
static int get_modalias(const struct tb_service *svc, char *buf, size_t size)
{
return snprintf(buf, size, "tbsvc:k%sp%08Xv%08Xr%08X", svc->key,
svc->prtcid, svc->prtcvers, svc->prtcrevs);
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
/* Full buffer size except new line and null termination */
get_modalias(svc, buf, PAGE_SIZE - 2);
return strlen(strcat(buf, "\n"));
}
static DEVICE_ATTR_RO(modalias);
static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
return sysfs_emit(buf, "%u\n", svc->prtcid);
}
static DEVICE_ATTR_RO(prtcid);
static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
return sysfs_emit(buf, "%u\n", svc->prtcvers);
}
static DEVICE_ATTR_RO(prtcvers);
static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
return sysfs_emit(buf, "%u\n", svc->prtcrevs);
}
static DEVICE_ATTR_RO(prtcrevs);
static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
return sysfs_emit(buf, "0x%08x\n", svc->prtcstns);
}
static DEVICE_ATTR_RO(prtcstns);
static struct attribute *tb_service_attrs[] = {
&dev_attr_key.attr,
&dev_attr_modalias.attr,
&dev_attr_prtcid.attr,
&dev_attr_prtcvers.attr,
&dev_attr_prtcrevs.attr,
&dev_attr_prtcstns.attr,
NULL,
};
static const struct attribute_group tb_service_attr_group = {
.attrs = tb_service_attrs,
};
static const struct attribute_group *tb_service_attr_groups[] = {
&tb_service_attr_group,
NULL,
};
static int tb_service_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct tb_service *svc = container_of_const(dev, struct tb_service, dev);
char modalias[64];
get_modalias(svc, modalias, sizeof(modalias));
return add_uevent_var(env, "MODALIAS=%s", modalias);
}
static void tb_service_release(struct device *dev)
{
struct tb_service *svc = container_of(dev, struct tb_service, dev);
struct tb_xdomain *xd = tb_service_parent(svc);
tb_service_debugfs_remove(svc);
ida_simple_remove(&xd->service_ids, svc->id);
kfree(svc->key);
kfree(svc);
}
struct device_type tb_service_type = {
.name = "thunderbolt_service",
.groups = tb_service_attr_groups,
.uevent = tb_service_uevent,
.release = tb_service_release,
};
EXPORT_SYMBOL_GPL(tb_service_type);
static int remove_missing_service(struct device *dev, void *data)
{
struct tb_xdomain *xd = data;
struct tb_service *svc;
svc = tb_to_service(dev);
if (!svc)
return 0;
if (!tb_property_find(xd->remote_properties, svc->key,
TB_PROPERTY_TYPE_DIRECTORY))
device_unregister(dev);
return 0;
}
static int find_service(struct device *dev, void *data)
{
const struct tb_property *p = data;
struct tb_service *svc;
svc = tb_to_service(dev);
if (!svc)
return 0;
return !strcmp(svc->key, p->key);
}
static int populate_service(struct tb_service *svc,
struct tb_property *property)
{
struct tb_property_dir *dir = property->value.dir;
struct tb_property *p;
/* Fill in standard properties */
p = tb_property_find(dir, "prtcid", TB_PROPERTY_TYPE_VALUE);
if (p)
svc->prtcid = p->value.immediate;
p = tb_property_find(dir, "prtcvers", TB_PROPERTY_TYPE_VALUE);
if (p)
svc->prtcvers = p->value.immediate;
p = tb_property_find(dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE);
if (p)
svc->prtcrevs = p->value.immediate;
p = tb_property_find(dir, "prtcstns", TB_PROPERTY_TYPE_VALUE);
if (p)
svc->prtcstns = p->value.immediate;
svc->key = kstrdup(property->key, GFP_KERNEL);
if (!svc->key)
return -ENOMEM;
return 0;
}
static void enumerate_services(struct tb_xdomain *xd)
{
struct tb_service *svc;
struct tb_property *p;
struct device *dev;
int id;
/*
* First remove all services that are not available anymore in
* the updated property block.
*/
device_for_each_child_reverse(&xd->dev, xd, remove_missing_service);
/* Then re-enumerate properties creating new services as we go */
tb_property_for_each(xd->remote_properties, p) {
if (p->type != TB_PROPERTY_TYPE_DIRECTORY)
continue;
/* If the service exists already we are fine */
dev = device_find_child(&xd->dev, p, find_service);
if (dev) {
put_device(dev);
continue;
}
svc = kzalloc(sizeof(*svc), GFP_KERNEL);
if (!svc)
break;
if (populate_service(svc, p)) {
kfree(svc);
break;
}
id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
if (id < 0) {
kfree(svc->key);
kfree(svc);
break;
}
svc->id = id;
svc->dev.bus = &tb_bus_type;
svc->dev.type = &tb_service_type;
svc->dev.parent = &xd->dev;
dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id);
tb_service_debugfs_init(svc);
if (device_register(&svc->dev)) {
put_device(&svc->dev);
break;
}
}
}
static int populate_properties(struct tb_xdomain *xd,
struct tb_property_dir *dir)
{
const struct tb_property *p;
/* Required properties */
p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE);
if (!p)
return -EINVAL;
xd->device = p->value.immediate;
p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE);
if (!p)
return -EINVAL;
xd->vendor = p->value.immediate;
p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE);
/*
* USB4 inter-domain spec suggests using 15 as HopID if the
* other end does not announce it in a property. This is for
* TBT3 compatibility.
*/
xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID;
kfree(xd->device_name);
xd->device_name = NULL;
kfree(xd->vendor_name);
xd->vendor_name = NULL;
/* Optional properties */
p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT);
if (p)
xd->device_name = kstrdup(p->value.text, GFP_KERNEL);
p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT);
if (p)
xd->vendor_name = kstrdup(p->value.text, GFP_KERNEL);
return 0;
}
static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd)
{
bool change = false;
struct tb_port *port;
int ret;
port = tb_xdomain_downstream_port(xd);
ret = tb_port_get_link_speed(port);
if (ret < 0)
return ret;
if (xd->link_speed != ret)
change = true;
xd->link_speed = ret;
ret = tb_port_get_link_width(port);
if (ret < 0)
return ret;
if (xd->link_width != ret)
change = true;
xd->link_width = ret;
if (change)
kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
return 0;
}
static int tb_xdomain_get_uuid(struct tb_xdomain *xd)
{
struct tb *tb = xd->tb;
uuid_t uuid;
u64 route;
int ret;
dev_dbg(&xd->dev, "requesting remote UUID\n");
ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid,
&route);
if (ret < 0) {
if (xd->state_retries-- > 0) {
dev_dbg(&xd->dev, "failed to request UUID, retrying\n");
return -EAGAIN;
}
dev_dbg(&xd->dev, "failed to read remote UUID\n");
return ret;
}
dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid);
if (uuid_equal(&uuid, xd->local_uuid)) {
if (route == xd->route)
dev_dbg(&xd->dev, "loop back detected\n");
else
dev_dbg(&xd->dev, "intra-domain loop detected\n");
/* Don't bond lanes automatically for loops */
xd->bonding_possible = false;
}
/*
* If the UUID is different, there is another domain connected
* so mark this one unplugged and wait for the connection
* manager to replace it.
*/
if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) {
dev_dbg(&xd->dev, "remote UUID is different, unplugging\n");
xd->is_unplugged = true;
return -ENODEV;
}
/* First time fill in the missing UUID */
if (!xd->remote_uuid) {
xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL);
if (!xd->remote_uuid)
return -ENOMEM;
}
return 0;
}
static int tb_xdomain_get_link_status(struct tb_xdomain *xd)
{
struct tb *tb = xd->tb;
u8 slw, tlw, sls, tls;
int ret;
dev_dbg(&xd->dev, "sending link state status request to %pUb\n",
xd->remote_uuid);
ret = tb_xdp_link_state_status_request(tb->ctl, xd->route,
xd->state_retries, &slw, &tlw, &sls,
&tls);
if (ret) {
if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
dev_dbg(&xd->dev,
"failed to request remote link status, retrying\n");
return -EAGAIN;
}
dev_dbg(&xd->dev, "failed to receive remote link status\n");
return ret;
}
dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls);
if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) {
dev_dbg(&xd->dev, "remote adapter is single lane only\n");
return -EOPNOTSUPP;
}
return 0;
}
static int tb_xdomain_link_state_change(struct tb_xdomain *xd,
unsigned int width)
{
struct tb_port *port = tb_xdomain_downstream_port(xd);
struct tb *tb = xd->tb;
u8 tlw, tls;
u32 val;
int ret;
if (width == 2)
tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL;
else if (width == 1)
tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE;
else
return -EINVAL;
/* Use the current target speed */
ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1);
if (ret)
return ret;
tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK;
dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n",
tlw, tls);
ret = tb_xdp_link_state_change_request(tb->ctl, xd->route,
xd->state_retries, tlw, tls);
if (ret) {
if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) {
dev_dbg(&xd->dev,
"failed to change remote link state, retrying\n");
return -EAGAIN;
}
dev_err(&xd->dev, "failed request link state change, aborting\n");
return ret;
}
dev_dbg(&xd->dev, "received link state change response\n");
return 0;
}
static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd)
{
unsigned int width, width_mask;
struct tb_port *port;
int ret;
if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) {
width = TB_LINK_WIDTH_SINGLE;
width_mask = width;
} else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) {
width = TB_LINK_WIDTH_DUAL;
width_mask = width | TB_LINK_WIDTH_ASYM_TX | TB_LINK_WIDTH_ASYM_RX;
} else {
if (xd->state_retries-- > 0) {
dev_dbg(&xd->dev,
"link state change request not received yet, retrying\n");
return -EAGAIN;
}
dev_dbg(&xd->dev, "timeout waiting for link change request\n");
return -ETIMEDOUT;
}
port = tb_xdomain_downstream_port(xd);
/*
* We can't use tb_xdomain_lane_bonding_enable() here because it
* is the other side that initiates lane bonding. So here we
* just set the width to both lane adapters and wait for the
* link to transition bonded.
*/
ret = tb_port_set_link_width(port->dual_link_port, width);
if (ret) {
tb_port_warn(port->dual_link_port,
"failed to set link width to %d\n", width);
return ret;
}
ret = tb_port_set_link_width(port, width);
if (ret) {
tb_port_warn(port, "failed to set link width to %d\n", width);
return ret;
}
ret = tb_port_wait_for_link_width(port, width_mask,
XDOMAIN_BONDING_TIMEOUT);
if (ret) {
dev_warn(&xd->dev, "error waiting for link width to become %d\n",
width_mask);
return ret;
}
port->bonded = width > TB_LINK_WIDTH_SINGLE;
port->dual_link_port->bonded = width > TB_LINK_WIDTH_SINGLE;
tb_port_update_credits(port);
tb_xdomain_update_link_attributes(xd);
dev_dbg(&xd->dev, "lane bonding %s\n", str_enabled_disabled(width == 2));
return 0;
}
static int tb_xdomain_get_properties(struct tb_xdomain *xd)
{
struct tb_property_dir *dir;
struct tb *tb = xd->tb;
bool update = false;
u32 *block = NULL;
u32 gen = 0;
int ret;
dev_dbg(&xd->dev, "requesting remote properties\n");
ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid,
xd->remote_uuid, xd->state_retries,
&block, &gen);
if (ret < 0) {
if (xd->state_retries-- > 0) {
dev_dbg(&xd->dev,
"failed to request remote properties, retrying\n");
return -EAGAIN;
}
/* Give up now */
dev_err(&xd->dev, "failed read XDomain properties from %pUb\n",
xd->remote_uuid);
return ret;
}
mutex_lock(&xd->lock);
/* Only accept newer generation properties */
if (xd->remote_properties && gen <= xd->remote_property_block_gen) {
ret = 0;
goto err_free_block;
}
dir = tb_property_parse_dir(block, ret);
if (!dir) {
dev_err(&xd->dev, "failed to parse XDomain properties\n");
ret = -ENOMEM;
goto err_free_block;
}
ret = populate_properties(xd, dir);
if (ret) {
dev_err(&xd->dev, "missing XDomain properties in response\n");
goto err_free_dir;
}
/* Release the existing one */
if (xd->remote_properties) {
tb_property_free_dir(xd->remote_properties);
update = true;
}
xd->remote_properties = dir;
xd->remote_property_block_gen = gen;
tb_xdomain_update_link_attributes(xd);
mutex_unlock(&xd->lock);
kfree(block);
/*
* Now the device should be ready enough so we can add it to the
* bus and let userspace know about it. If the device is already
* registered, we notify the userspace that it has changed.
*/
if (!update) {
/*
* Now disable lane 1 if bonding was not enabled. Do
* this only if bonding was possible at the beginning
* (that is we are the connection manager and there are
* two lanes).
*/
if (xd->bonding_possible) {
struct tb_port *port;
port = tb_xdomain_downstream_port(xd);
if (!port->bonded)
tb_port_disable(port->dual_link_port);
}
if (device_add(&xd->dev)) {
dev_err(&xd->dev, "failed to add XDomain device\n");
return -ENODEV;
}
dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n",
xd->vendor, xd->device);
if (xd->vendor_name && xd->device_name)
dev_info(&xd->dev, "%s %s\n", xd->vendor_name,
xd->device_name);
tb_xdomain_debugfs_init(xd);
} else {
kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE);
}
enumerate_services(xd);
return 0;
err_free_dir:
tb_property_free_dir(dir);
err_free_block:
kfree(block);
mutex_unlock(&xd->lock);
return ret;
}
static void tb_xdomain_queue_uuid(struct tb_xdomain *xd)
{
xd->state = XDOMAIN_STATE_UUID;
xd->state_retries = XDOMAIN_RETRIES;
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
}
static void tb_xdomain_queue_link_status(struct tb_xdomain *xd)
{
xd->state = XDOMAIN_STATE_LINK_STATUS;
xd->state_retries = XDOMAIN_RETRIES;
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd)
{
xd->state = XDOMAIN_STATE_LINK_STATUS2;
xd->state_retries = XDOMAIN_RETRIES;
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
static void tb_xdomain_queue_bonding(struct tb_xdomain *xd)
{
if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) {
dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n");
xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH;
} else {
dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n");
xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE;
}
xd->state_retries = XDOMAIN_RETRIES;
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd)
{
xd->state = XDOMAIN_STATE_BONDING_UUID_LOW;
xd->state_retries = XDOMAIN_RETRIES;
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
static void tb_xdomain_queue_properties(struct tb_xdomain *xd)
{
xd->state = XDOMAIN_STATE_PROPERTIES;
xd->state_retries = XDOMAIN_RETRIES;
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd)
{
xd->properties_changed_retries = XDOMAIN_RETRIES;
queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
}
static void tb_xdomain_state_work(struct work_struct *work)
{
struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work);
int ret, state = xd->state;
if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT ||
state > XDOMAIN_STATE_ERROR))
return;
dev_dbg(&xd->dev, "running state %s\n", state_names[state]);
switch (state) {
case XDOMAIN_STATE_INIT:
if (xd->needs_uuid) {
tb_xdomain_queue_uuid(xd);
} else {
tb_xdomain_queue_properties_changed(xd);
tb_xdomain_queue_properties(xd);
}
break;
case XDOMAIN_STATE_UUID:
ret = tb_xdomain_get_uuid(xd);
if (ret) {
if (ret == -EAGAIN)
goto retry_state;
xd->state = XDOMAIN_STATE_ERROR;
} else {
tb_xdomain_queue_properties_changed(xd);
if (xd->bonding_possible)
tb_xdomain_queue_link_status(xd);
else
tb_xdomain_queue_properties(xd);
}
break;
case XDOMAIN_STATE_LINK_STATUS:
ret = tb_xdomain_get_link_status(xd);
if (ret) {
if (ret == -EAGAIN)
goto retry_state;
/*
* If any of the lane bonding states fail we skip
* bonding completely and try to continue from
* reading properties.
*/
tb_xdomain_queue_properties(xd);
} else {
tb_xdomain_queue_bonding(xd);
}
break;
case XDOMAIN_STATE_LINK_STATE_CHANGE:
ret = tb_xdomain_link_state_change(xd, 2);
if (ret) {
if (ret == -EAGAIN)
goto retry_state;
tb_xdomain_queue_properties(xd);
} else {
tb_xdomain_queue_link_status2(xd);
}
break;
case XDOMAIN_STATE_LINK_STATUS2:
ret = tb_xdomain_get_link_status(xd);
if (ret) {
if (ret == -EAGAIN)
goto retry_state;
tb_xdomain_queue_properties(xd);
} else {
tb_xdomain_queue_bonding_uuid_low(xd);
}
break;
case XDOMAIN_STATE_BONDING_UUID_LOW:
tb_xdomain_lane_bonding_enable(xd);
tb_xdomain_queue_properties(xd);
break;
case XDOMAIN_STATE_BONDING_UUID_HIGH:
if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN)
goto retry_state;
tb_xdomain_queue_properties(xd);
break;
case XDOMAIN_STATE_PROPERTIES:
ret = tb_xdomain_get_properties(xd);
if (ret) {
if (ret == -EAGAIN)
goto retry_state;
xd->state = XDOMAIN_STATE_ERROR;
} else {
xd->state = XDOMAIN_STATE_ENUMERATED;
}
break;
case XDOMAIN_STATE_ENUMERATED:
tb_xdomain_queue_properties(xd);
break;
case XDOMAIN_STATE_ERROR:
break;
default:
dev_warn(&xd->dev, "unexpected state %d\n", state);
break;
}
return;
retry_state:
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
static void tb_xdomain_properties_changed(struct work_struct *work)
{
struct tb_xdomain *xd = container_of(work, typeof(*xd),
properties_changed_work.work);
int ret;
dev_dbg(&xd->dev, "sending properties changed notification\n");
ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route,
xd->properties_changed_retries, xd->local_uuid);
if (ret) {
if (xd->properties_changed_retries-- > 0) {
dev_dbg(&xd->dev,
"failed to send properties changed notification, retrying\n");
queue_delayed_work(xd->tb->wq,
&xd->properties_changed_work,
msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT));
}
dev_err(&xd->dev, "failed to send properties changed notification\n");
return;
}
xd->properties_changed_retries = XDOMAIN_RETRIES;
}
static ssize_t device_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sysfs_emit(buf, "%#x\n", xd->device);
}
static DEVICE_ATTR_RO(device);
static ssize_t
device_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
int ret;
if (mutex_lock_interruptible(&xd->lock))
return -ERESTARTSYS;
ret = sysfs_emit(buf, "%s\n", xd->device_name ?: "");
mutex_unlock(&xd->lock);
return ret;
}
static DEVICE_ATTR_RO(device_name);
static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sysfs_emit(buf, "%d\n", xd->remote_max_hopid);
}
static DEVICE_ATTR_RO(maxhopid);
static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sysfs_emit(buf, "%#x\n", xd->vendor);
}
static DEVICE_ATTR_RO(vendor);
static ssize_t
vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
int ret;
if (mutex_lock_interruptible(&xd->lock))
return -ERESTARTSYS;
ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: "");
mutex_unlock(&xd->lock);
return ret;
}
static DEVICE_ATTR_RO(vendor_name);
static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sysfs_emit(buf, "%pUb\n", xd->remote_uuid);
}
static DEVICE_ATTR_RO(unique_id);
static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed);
}
static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL);
static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL);
static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
unsigned int width;
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
case TB_LINK_WIDTH_ASYM_RX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
case TB_LINK_WIDTH_ASYM_TX:
width = 3;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", width);
}
static DEVICE_ATTR(rx_lanes, 0444, rx_lanes_show, NULL);
static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
unsigned int width;
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
case TB_LINK_WIDTH_ASYM_TX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
case TB_LINK_WIDTH_ASYM_RX:
width = 3;
break;
default:
WARN_ON_ONCE(1);
return -EINVAL;
}
return sysfs_emit(buf, "%u\n", width);
}
static DEVICE_ATTR(tx_lanes, 0444, tx_lanes_show, NULL);
static struct attribute *xdomain_attrs[] = {
&dev_attr_device.attr,
&dev_attr_device_name.attr,
&dev_attr_maxhopid.attr,
&dev_attr_rx_lanes.attr,
&dev_attr_rx_speed.attr,
&dev_attr_tx_lanes.attr,
&dev_attr_tx_speed.attr,
&dev_attr_unique_id.attr,
&dev_attr_vendor.attr,
&dev_attr_vendor_name.attr,
NULL,
};
static const struct attribute_group xdomain_attr_group = {
.attrs = xdomain_attrs,
};
static const struct attribute_group *xdomain_attr_groups[] = {
&xdomain_attr_group,
NULL,
};
static void tb_xdomain_release(struct device *dev)
{
struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev);
put_device(xd->dev.parent);
kfree(xd->local_property_block);
tb_property_free_dir(xd->remote_properties);
ida_destroy(&xd->out_hopids);
ida_destroy(&xd->in_hopids);
ida_destroy(&xd->service_ids);
kfree(xd->local_uuid);
kfree(xd->remote_uuid);
kfree(xd->device_name);
kfree(xd->vendor_name);
kfree(xd);
}
static void start_handshake(struct tb_xdomain *xd)
{
xd->state = XDOMAIN_STATE_INIT;
queue_delayed_work(xd->tb->wq, &xd->state_work,
msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT));
}
static void stop_handshake(struct tb_xdomain *xd)
{
cancel_delayed_work_sync(&xd->properties_changed_work);
cancel_delayed_work_sync(&xd->state_work);
xd->properties_changed_retries = 0;
xd->state_retries = 0;
}
static int __maybe_unused tb_xdomain_suspend(struct device *dev)
{
stop_handshake(tb_to_xdomain(dev));
return 0;
}
static int __maybe_unused tb_xdomain_resume(struct device *dev)
{
start_handshake(tb_to_xdomain(dev));
return 0;
}
static const struct dev_pm_ops tb_xdomain_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
};
struct device_type tb_xdomain_type = {
.name = "thunderbolt_xdomain",
.release = tb_xdomain_release,
.pm = &tb_xdomain_pm_ops,
};
EXPORT_SYMBOL_GPL(tb_xdomain_type);
/**
* tb_xdomain_alloc() - Allocate new XDomain object
* @tb: Domain where the XDomain belongs
* @parent: Parent device (the switch through the connection to the
* other domain is reached).
* @route: Route string used to reach the other domain
* @local_uuid: Our local domain UUID
* @remote_uuid: UUID of the other domain (optional)
*
* Allocates new XDomain structure and returns pointer to that. The
* object must be released by calling tb_xdomain_put().
*/
struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent,
u64 route, const uuid_t *local_uuid,
const uuid_t *remote_uuid)
{
struct tb_switch *parent_sw = tb_to_switch(parent);
struct tb_xdomain *xd;
struct tb_port *down;
/* Make sure the downstream domain is accessible */
down = tb_port_at(route, parent_sw);
tb_port_unlock(down);
xd = kzalloc(sizeof(*xd), GFP_KERNEL);
if (!xd)
return NULL;
xd->tb = tb;
xd->route = route;
xd->local_max_hopid = down->config.max_in_hop_id;
ida_init(&xd->service_ids);
ida_init(&xd->in_hopids);
ida_init(&xd->out_hopids);
mutex_init(&xd->lock);
INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work);
INIT_DELAYED_WORK(&xd->properties_changed_work,
tb_xdomain_properties_changed);
xd->local_uuid = kmemdup(local_uuid, sizeof(uuid_t), GFP_KERNEL);
if (!xd->local_uuid)
goto err_free;
if (remote_uuid) {
xd->remote_uuid = kmemdup(remote_uuid, sizeof(uuid_t),
GFP_KERNEL);
if (!xd->remote_uuid)
goto err_free_local_uuid;
} else {
xd->needs_uuid = true;
xd->bonding_possible = !!down->dual_link_port;
}
device_initialize(&xd->dev);
xd->dev.parent = get_device(parent);
xd->dev.bus = &tb_bus_type;
xd->dev.type = &tb_xdomain_type;
xd->dev.groups = xdomain_attr_groups;
dev_set_name(&xd->dev, "%u-%llx", tb->index, route);
dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid);
if (remote_uuid)
dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid);
/*
* This keeps the DMA powered on as long as we have active
* connection to another host.
*/
pm_runtime_set_active(&xd->dev);
pm_runtime_get_noresume(&xd->dev);
pm_runtime_enable(&xd->dev);
return xd;
err_free_local_uuid:
kfree(xd->local_uuid);
err_free:
kfree(xd);
return NULL;
}
/**
* tb_xdomain_add() - Add XDomain to the bus
* @xd: XDomain to add
*
* This function starts XDomain discovery protocol handshake and
* eventually adds the XDomain to the bus. After calling this function
* the caller needs to call tb_xdomain_remove() in order to remove and
* release the object regardless whether the handshake succeeded or not.
*/
void tb_xdomain_add(struct tb_xdomain *xd)
{
/* Start exchanging properties with the other host */
start_handshake(xd);
}
static int unregister_service(struct device *dev, void *data)
{
device_unregister(dev);
return 0;
}
/**
* tb_xdomain_remove() - Remove XDomain from the bus
* @xd: XDomain to remove
*
* This will stop all ongoing configuration work and remove the XDomain
* along with any services from the bus. When the last reference to @xd
* is released the object will be released as well.
*/
void tb_xdomain_remove(struct tb_xdomain *xd)
{
tb_xdomain_debugfs_remove(xd);
stop_handshake(xd);
device_for_each_child_reverse(&xd->dev, xd, unregister_service);
/*
* Undo runtime PM here explicitly because it is possible that
* the XDomain was never added to the bus and thus device_del()
* is not called for it (device_del() would handle this otherwise).
*/
pm_runtime_disable(&xd->dev);
pm_runtime_put_noidle(&xd->dev);
pm_runtime_set_suspended(&xd->dev);
if (!device_is_registered(&xd->dev)) {
put_device(&xd->dev);
} else {
dev_info(&xd->dev, "host disconnected\n");
device_unregister(&xd->dev);
}
}
/**
* tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain
* @xd: XDomain connection
*
* Lane bonding is disabled by default for XDomains. This function tries
* to enable bonding by first enabling the port and waiting for the CL0
* state.
*
* Return: %0 in case of success and negative errno in case of error.
*/
int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd)
{
unsigned int width_mask;
struct tb_port *port;
int ret;
port = tb_xdomain_downstream_port(xd);
if (!port->dual_link_port)
return -ENODEV;
ret = tb_port_enable(port->dual_link_port);
if (ret)
return ret;
ret = tb_wait_for_port(port->dual_link_port, true);
if (ret < 0)
return ret;
if (!ret)
return -ENOTCONN;
ret = tb_port_lane_bonding_enable(port);
if (ret) {
tb_port_warn(port, "failed to enable lane bonding\n");
return ret;
}
/* Any of the widths are all bonded */
width_mask = TB_LINK_WIDTH_DUAL | TB_LINK_WIDTH_ASYM_TX |
TB_LINK_WIDTH_ASYM_RX;
ret = tb_port_wait_for_link_width(port, width_mask,
XDOMAIN_BONDING_TIMEOUT);
if (ret) {
tb_port_warn(port, "failed to enable lane bonding\n");
return ret;
}
tb_port_update_credits(port);
tb_xdomain_update_link_attributes(xd);
dev_dbg(&xd->dev, "lane bonding enabled\n");
return 0;
}
EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable);
/**
* tb_xdomain_lane_bonding_disable() - Disable lane bonding
* @xd: XDomain connection
*
* Lane bonding is disabled by default for XDomains. If bonding has been
* enabled, this function can be used to disable it.
*/
void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd)
{
struct tb_port *port;
port = tb_xdomain_downstream_port(xd);
if (port->dual_link_port) {
int ret;
tb_port_lane_bonding_disable(port);
ret = tb_port_wait_for_link_width(port, TB_LINK_WIDTH_SINGLE, 100);
if (ret == -ETIMEDOUT)
tb_port_warn(port, "timeout disabling lane bonding\n");
tb_port_disable(port->dual_link_port);
tb_port_update_credits(port);
tb_xdomain_update_link_attributes(xd);
dev_dbg(&xd->dev, "lane bonding disabled\n");
}
}
EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable);
/**
* tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling
* @xd: XDomain connection
* @hopid: Preferred HopID or %-1 for next available
*
* Returns allocated HopID or negative errno. Specifically returns
* %-ENOSPC if there are no more available HopIDs. Returned HopID is
* guaranteed to be within range supported by the input lane adapter.
* Call tb_xdomain_release_in_hopid() to release the allocated HopID.
*/
int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid)
{
if (hopid < 0)
hopid = TB_PATH_MIN_HOPID;
if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid)
return -EINVAL;
return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid,
GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid);
/**
* tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling
* @xd: XDomain connection
* @hopid: Preferred HopID or %-1 for next available
*
* Returns allocated HopID or negative errno. Specifically returns
* %-ENOSPC if there are no more available HopIDs. Returned HopID is
* guaranteed to be within range supported by the output lane adapter.
* Call tb_xdomain_release_in_hopid() to release the allocated HopID.
*/
int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid)
{
if (hopid < 0)
hopid = TB_PATH_MIN_HOPID;
if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid)
return -EINVAL;
return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid,
GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid);
/**
* tb_xdomain_release_in_hopid() - Release input HopID
* @xd: XDomain connection
* @hopid: HopID to release
*/
void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid)
{
ida_free(&xd->in_hopids, hopid);
}
EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid);
/**
* tb_xdomain_release_out_hopid() - Release output HopID
* @xd: XDomain connection
* @hopid: HopID to release
*/
void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid)
{
ida_free(&xd->out_hopids, hopid);
}
EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid);
/**
* tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection
* @xd: XDomain connection
* @transmit_path: HopID we are using to send out packets
* @transmit_ring: DMA ring used to send out packets
* @receive_path: HopID the other end is using to send packets to us
* @receive_ring: DMA ring used to receive packets from @receive_path
*
* The function enables DMA paths accordingly so that after successful
* return the caller can send and receive packets using high-speed DMA
* path. If a transmit or receive path is not needed, pass %-1 for those
* parameters.
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path,
int transmit_ring, int receive_path,
int receive_ring)
{
return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path,
transmit_ring, receive_path,
receive_ring);
}
EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths);
/**
* tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection
* @xd: XDomain connection
* @transmit_path: HopID we are using to send out packets
* @transmit_ring: DMA ring used to send out packets
* @receive_path: HopID the other end is using to send packets to us
* @receive_ring: DMA ring used to receive packets from @receive_path
*
* This does the opposite of tb_xdomain_enable_paths(). After call to
* this the caller is not expected to use the rings anymore. Passing %-1
* as path/ring parameter means don't care. Normally the callers should
* pass the same values here as they do when paths are enabled.
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path,
int transmit_ring, int receive_path,
int receive_ring)
{
return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path,
transmit_ring, receive_path,
receive_ring);
}
EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths);
struct tb_xdomain_lookup {
const uuid_t *uuid;
u8 link;
u8 depth;
u64 route;
};
static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw,
const struct tb_xdomain_lookup *lookup)
{
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
struct tb_xdomain *xd;
if (port->xdomain) {
xd = port->xdomain;
if (lookup->uuid) {
if (xd->remote_uuid &&
uuid_equal(xd->remote_uuid, lookup->uuid))
return xd;
} else {
if (lookup->link && lookup->link == xd->link &&
lookup->depth == xd->depth)
return xd;
if (lookup->route && lookup->route == xd->route)
return xd;
}
} else if (tb_port_has_remote(port)) {
xd = switch_find_xdomain(port->remote->sw, lookup);
if (xd)
return xd;
}
}
return NULL;
}
/**
* tb_xdomain_find_by_uuid() - Find an XDomain by UUID
* @tb: Domain where the XDomain belongs to
* @uuid: UUID to look for
*
* Finds XDomain by walking through the Thunderbolt topology below @tb.
* The returned XDomain will have its reference count increased so the
* caller needs to call tb_xdomain_put() when it is done with the
* object.
*
* This will find all XDomains including the ones that are not yet added
* to the bus (handshake is still in progress).
*
* The caller needs to hold @tb->lock.
*/
struct tb_xdomain *tb_xdomain_find_by_uuid(struct tb *tb, const uuid_t *uuid)
{
struct tb_xdomain_lookup lookup;
struct tb_xdomain *xd;
memset(&lookup, 0, sizeof(lookup));
lookup.uuid = uuid;
xd = switch_find_xdomain(tb->root_switch, &lookup);
return tb_xdomain_get(xd);
}
EXPORT_SYMBOL_GPL(tb_xdomain_find_by_uuid);
/**
* tb_xdomain_find_by_link_depth() - Find an XDomain by link and depth
* @tb: Domain where the XDomain belongs to
* @link: Root switch link number
* @depth: Depth in the link
*
* Finds XDomain by walking through the Thunderbolt topology below @tb.
* The returned XDomain will have its reference count increased so the
* caller needs to call tb_xdomain_put() when it is done with the
* object.
*
* This will find all XDomains including the ones that are not yet added
* to the bus (handshake is still in progress).
*
* The caller needs to hold @tb->lock.
*/
struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link,
u8 depth)
{
struct tb_xdomain_lookup lookup;
struct tb_xdomain *xd;
memset(&lookup, 0, sizeof(lookup));
lookup.link = link;
lookup.depth = depth;
xd = switch_find_xdomain(tb->root_switch, &lookup);
return tb_xdomain_get(xd);
}
/**
* tb_xdomain_find_by_route() - Find an XDomain by route string
* @tb: Domain where the XDomain belongs to
* @route: XDomain route string
*
* Finds XDomain by walking through the Thunderbolt topology below @tb.
* The returned XDomain will have its reference count increased so the
* caller needs to call tb_xdomain_put() when it is done with the
* object.
*
* This will find all XDomains including the ones that are not yet added
* to the bus (handshake is still in progress).
*
* The caller needs to hold @tb->lock.
*/
struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route)
{
struct tb_xdomain_lookup lookup;
struct tb_xdomain *xd;
memset(&lookup, 0, sizeof(lookup));
lookup.route = route;
xd = switch_find_xdomain(tb->root_switch, &lookup);
return tb_xdomain_get(xd);
}
EXPORT_SYMBOL_GPL(tb_xdomain_find_by_route);
bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
const struct tb_protocol_handler *handler, *tmp;
const struct tb_xdp_header *hdr = buf;
unsigned int length;
int ret = 0;
/* We expect the packet is at least size of the header */
length = hdr->xd_hdr.length_sn & TB_XDOMAIN_LENGTH_MASK;
if (length != size / 4 - sizeof(hdr->xd_hdr) / 4)
return true;
if (length < sizeof(*hdr) / 4 - sizeof(hdr->xd_hdr) / 4)
return true;
/*
* Handle XDomain discovery protocol packets directly here. For
* other protocols (based on their UUID) we call registered
* handlers in turn.
*/
if (uuid_equal(&hdr->uuid, &tb_xdp_uuid)) {
if (type == TB_CFG_PKG_XDOMAIN_REQ)
return tb_xdp_schedule_request(tb, hdr, size);
return false;
}
mutex_lock(&xdomain_lock);
list_for_each_entry_safe(handler, tmp, &protocol_handlers, list) {
if (!uuid_equal(&hdr->uuid, handler->uuid))
continue;
mutex_unlock(&xdomain_lock);
ret = handler->callback(buf, size, handler->data);
mutex_lock(&xdomain_lock);
if (ret)
break;
}
mutex_unlock(&xdomain_lock);
return ret > 0;
}
static int update_xdomain(struct device *dev, void *data)
{
struct tb_xdomain *xd;
xd = tb_to_xdomain(dev);
if (xd) {
queue_delayed_work(xd->tb->wq, &xd->properties_changed_work,
msecs_to_jiffies(50));
}
return 0;
}
static void update_all_xdomains(void)
{
bus_for_each_dev(&tb_bus_type, NULL, NULL, update_xdomain);
}
static bool remove_directory(const char *key, const struct tb_property_dir *dir)
{
struct tb_property *p;
p = tb_property_find(xdomain_property_dir, key,
TB_PROPERTY_TYPE_DIRECTORY);
if (p && p->value.dir == dir) {
tb_property_remove(p);
return true;
}
return false;
}
/**
* tb_register_property_dir() - Register property directory to the host
* @key: Key (name) of the directory to add
* @dir: Directory to add
*
* Service drivers can use this function to add new property directory
* to the host available properties. The other connected hosts are
* notified so they can re-read properties of this host if they are
* interested.
*
* Return: %0 on success and negative errno on failure
*/
int tb_register_property_dir(const char *key, struct tb_property_dir *dir)
{
int ret;
if (WARN_ON(!xdomain_property_dir))
return -EAGAIN;
if (!key || strlen(key) > 8)
return -EINVAL;
mutex_lock(&xdomain_lock);
if (tb_property_find(xdomain_property_dir, key,
TB_PROPERTY_TYPE_DIRECTORY)) {
ret = -EEXIST;
goto err_unlock;
}
ret = tb_property_add_dir(xdomain_property_dir, key, dir);
if (ret)
goto err_unlock;
xdomain_property_block_gen++;
mutex_unlock(&xdomain_lock);
update_all_xdomains();
return 0;
err_unlock:
mutex_unlock(&xdomain_lock);
return ret;
}
EXPORT_SYMBOL_GPL(tb_register_property_dir);
/**
* tb_unregister_property_dir() - Removes property directory from host
* @key: Key (name) of the directory
* @dir: Directory to remove
*
* This will remove the existing directory from this host and notify the
* connected hosts about the change.
*/
void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir)
{
int ret = 0;
mutex_lock(&xdomain_lock);
if (remove_directory(key, dir))
xdomain_property_block_gen++;
mutex_unlock(&xdomain_lock);
if (!ret)
update_all_xdomains();
}
EXPORT_SYMBOL_GPL(tb_unregister_property_dir);
int tb_xdomain_init(void)
{
xdomain_property_dir = tb_property_create_dir(NULL);
if (!xdomain_property_dir)
return -ENOMEM;
/*
* Initialize standard set of properties without any service
* directories. Those will be added by service drivers
* themselves when they are loaded.
*
* Rest of the properties are filled dynamically based on these
* when the P2P connection is made.
*/
tb_property_add_immediate(xdomain_property_dir, "vendorid",
PCI_VENDOR_ID_INTEL);
tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp.");
tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1);
tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100);
xdomain_property_block_gen = get_random_u32();
return 0;
}
void tb_xdomain_exit(void)
{
tb_property_free_dir(xdomain_property_dir);
}
| linux-master | drivers/thunderbolt/xdomain.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt bus support
*
* Copyright (C) 2017, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include <linux/device.h>
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <crypto/hash.h>
#include "tb.h"
static DEFINE_IDA(tb_domain_ida);
static bool match_service_id(const struct tb_service_id *id,
const struct tb_service *svc)
{
if (id->match_flags & TBSVC_MATCH_PROTOCOL_KEY) {
if (strcmp(id->protocol_key, svc->key))
return false;
}
if (id->match_flags & TBSVC_MATCH_PROTOCOL_ID) {
if (id->protocol_id != svc->prtcid)
return false;
}
if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
if (id->protocol_version != svc->prtcvers)
return false;
}
if (id->match_flags & TBSVC_MATCH_PROTOCOL_VERSION) {
if (id->protocol_revision != svc->prtcrevs)
return false;
}
return true;
}
static const struct tb_service_id *__tb_service_match(struct device *dev,
struct device_driver *drv)
{
struct tb_service_driver *driver;
const struct tb_service_id *ids;
struct tb_service *svc;
svc = tb_to_service(dev);
if (!svc)
return NULL;
driver = container_of(drv, struct tb_service_driver, driver);
if (!driver->id_table)
return NULL;
for (ids = driver->id_table; ids->match_flags != 0; ids++) {
if (match_service_id(ids, svc))
return ids;
}
return NULL;
}
static int tb_service_match(struct device *dev, struct device_driver *drv)
{
return !!__tb_service_match(dev, drv);
}
static int tb_service_probe(struct device *dev)
{
struct tb_service *svc = tb_to_service(dev);
struct tb_service_driver *driver;
const struct tb_service_id *id;
driver = container_of(dev->driver, struct tb_service_driver, driver);
id = __tb_service_match(dev, &driver->driver);
return driver->probe(svc, id);
}
static void tb_service_remove(struct device *dev)
{
struct tb_service *svc = tb_to_service(dev);
struct tb_service_driver *driver;
driver = container_of(dev->driver, struct tb_service_driver, driver);
if (driver->remove)
driver->remove(svc);
}
static void tb_service_shutdown(struct device *dev)
{
struct tb_service_driver *driver;
struct tb_service *svc;
svc = tb_to_service(dev);
if (!svc || !dev->driver)
return;
driver = container_of(dev->driver, struct tb_service_driver, driver);
if (driver->shutdown)
driver->shutdown(svc);
}
static const char * const tb_security_names[] = {
[TB_SECURITY_NONE] = "none",
[TB_SECURITY_USER] = "user",
[TB_SECURITY_SECURE] = "secure",
[TB_SECURITY_DPONLY] = "dponly",
[TB_SECURITY_USBONLY] = "usbonly",
[TB_SECURITY_NOPCIE] = "nopcie",
};
static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb *tb = container_of(dev, struct tb, dev);
uuid_t *uuids;
ssize_t ret;
int i;
uuids = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
if (!uuids)
return -ENOMEM;
pm_runtime_get_sync(&tb->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto out;
}
ret = tb->cm_ops->get_boot_acl(tb, uuids, tb->nboot_acl);
if (ret) {
mutex_unlock(&tb->lock);
goto out;
}
mutex_unlock(&tb->lock);
for (ret = 0, i = 0; i < tb->nboot_acl; i++) {
if (!uuid_is_null(&uuids[i]))
ret += sysfs_emit_at(buf, ret, "%pUb", &uuids[i]);
ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n");
}
out:
pm_runtime_mark_last_busy(&tb->dev);
pm_runtime_put_autosuspend(&tb->dev);
kfree(uuids);
return ret;
}
static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct tb *tb = container_of(dev, struct tb, dev);
char *str, *s, *uuid_str;
ssize_t ret = 0;
uuid_t *acl;
int i = 0;
/*
* Make sure the value is not bigger than tb->nboot_acl * UUID
* length + commas and optional "\n". Also the smallest allowable
* string is tb->nboot_acl * ",".
*/
if (count > (UUID_STRING_LEN + 1) * tb->nboot_acl + 1)
return -EINVAL;
if (count < tb->nboot_acl - 1)
return -EINVAL;
str = kstrdup(buf, GFP_KERNEL);
if (!str)
return -ENOMEM;
acl = kcalloc(tb->nboot_acl, sizeof(uuid_t), GFP_KERNEL);
if (!acl) {
ret = -ENOMEM;
goto err_free_str;
}
uuid_str = strim(str);
while ((s = strsep(&uuid_str, ",")) != NULL && i < tb->nboot_acl) {
size_t len = strlen(s);
if (len) {
if (len != UUID_STRING_LEN) {
ret = -EINVAL;
goto err_free_acl;
}
ret = uuid_parse(s, &acl[i]);
if (ret)
goto err_free_acl;
}
i++;
}
if (s || i < tb->nboot_acl) {
ret = -EINVAL;
goto err_free_acl;
}
pm_runtime_get_sync(&tb->dev);
if (mutex_lock_interruptible(&tb->lock)) {
ret = -ERESTARTSYS;
goto err_rpm_put;
}
ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
if (!ret) {
/* Notify userspace about the change */
kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
}
mutex_unlock(&tb->lock);
err_rpm_put:
pm_runtime_mark_last_busy(&tb->dev);
pm_runtime_put_autosuspend(&tb->dev);
err_free_acl:
kfree(acl);
err_free_str:
kfree(str);
return ret ?: count;
}
static DEVICE_ATTR_RW(boot_acl);
static ssize_t deauthorization_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
const struct tb *tb = container_of(dev, struct tb, dev);
bool deauthorization = false;
/* Only meaningful if authorization is supported */
if (tb->security_level == TB_SECURITY_USER ||
tb->security_level == TB_SECURITY_SECURE)
deauthorization = !!tb->cm_ops->disapprove_switch;
return sysfs_emit(buf, "%d\n", deauthorization);
}
static DEVICE_ATTR_RO(deauthorization);
static ssize_t iommu_dma_protection_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct tb *tb = container_of(dev, struct tb, dev);
return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection);
}
static DEVICE_ATTR_RO(iommu_dma_protection);
static ssize_t security_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tb *tb = container_of(dev, struct tb, dev);
const char *name = "unknown";
if (tb->security_level < ARRAY_SIZE(tb_security_names))
name = tb_security_names[tb->security_level];
return sysfs_emit(buf, "%s\n", name);
}
static DEVICE_ATTR_RO(security);
static struct attribute *domain_attrs[] = {
&dev_attr_boot_acl.attr,
&dev_attr_deauthorization.attr,
&dev_attr_iommu_dma_protection.attr,
&dev_attr_security.attr,
NULL,
};
static umode_t domain_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct tb *tb = container_of(dev, struct tb, dev);
if (attr == &dev_attr_boot_acl.attr) {
if (tb->nboot_acl &&
tb->cm_ops->get_boot_acl &&
tb->cm_ops->set_boot_acl)
return attr->mode;
return 0;
}
return attr->mode;
}
static const struct attribute_group domain_attr_group = {
.is_visible = domain_attr_is_visible,
.attrs = domain_attrs,
};
static const struct attribute_group *domain_attr_groups[] = {
&domain_attr_group,
NULL,
};
struct bus_type tb_bus_type = {
.name = "thunderbolt",
.match = tb_service_match,
.probe = tb_service_probe,
.remove = tb_service_remove,
.shutdown = tb_service_shutdown,
};
static void tb_domain_release(struct device *dev)
{
struct tb *tb = container_of(dev, struct tb, dev);
tb_ctl_free(tb->ctl);
destroy_workqueue(tb->wq);
ida_simple_remove(&tb_domain_ida, tb->index);
mutex_destroy(&tb->lock);
kfree(tb);
}
struct device_type tb_domain_type = {
.name = "thunderbolt_domain",
.release = tb_domain_release,
};
static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size)
{
struct tb *tb = data;
if (!tb->cm_ops->handle_event) {
tb_warn(tb, "domain does not have event handler\n");
return true;
}
switch (type) {
case TB_CFG_PKG_XDOMAIN_REQ:
case TB_CFG_PKG_XDOMAIN_RESP:
if (tb_is_xdomain_enabled())
return tb_xdomain_handle_request(tb, type, buf, size);
break;
default:
tb->cm_ops->handle_event(tb, type, buf, size);
}
return true;
}
/**
* tb_domain_alloc() - Allocate a domain
* @nhi: Pointer to the host controller
* @timeout_msec: Control channel timeout for non-raw messages
* @privsize: Size of the connection manager private data
*
* Allocates and initializes a new Thunderbolt domain. Connection
* managers are expected to call this and then fill in @cm_ops
* accordingly.
*
* Call tb_domain_put() to release the domain before it has been added
* to the system.
*
* Return: allocated domain structure on %NULL in case of error
*/
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize)
{
struct tb *tb;
/*
* Make sure the structure sizes map with that the hardware
* expects because bit-fields are being used.
*/
BUILD_BUG_ON(sizeof(struct tb_regs_switch_header) != 5 * 4);
BUILD_BUG_ON(sizeof(struct tb_regs_port_header) != 8 * 4);
BUILD_BUG_ON(sizeof(struct tb_regs_hop) != 2 * 4);
tb = kzalloc(sizeof(*tb) + privsize, GFP_KERNEL);
if (!tb)
return NULL;
tb->nhi = nhi;
mutex_init(&tb->lock);
tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
if (tb->index < 0)
goto err_free;
tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index);
if (!tb->wq)
goto err_remove_ida;
tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
if (!tb->ctl)
goto err_destroy_wq;
tb->dev.parent = &nhi->pdev->dev;
tb->dev.bus = &tb_bus_type;
tb->dev.type = &tb_domain_type;
tb->dev.groups = domain_attr_groups;
dev_set_name(&tb->dev, "domain%d", tb->index);
device_initialize(&tb->dev);
return tb;
err_destroy_wq:
destroy_workqueue(tb->wq);
err_remove_ida:
ida_simple_remove(&tb_domain_ida, tb->index);
err_free:
kfree(tb);
return NULL;
}
/**
* tb_domain_add() - Add domain to the system
* @tb: Domain to add
*
* Starts the domain and adds it to the system. Hotplugging devices will
* work after this has been returned successfully. In order to remove
* and release the domain after this function has been called, call
* tb_domain_remove().
*
* Return: %0 in case of success and negative errno in case of error
*/
int tb_domain_add(struct tb *tb)
{
int ret;
if (WARN_ON(!tb->cm_ops))
return -EINVAL;
mutex_lock(&tb->lock);
/*
* tb_schedule_hotplug_handler may be called as soon as the config
* channel is started. Thats why we have to hold the lock here.
*/
tb_ctl_start(tb->ctl);
if (tb->cm_ops->driver_ready) {
ret = tb->cm_ops->driver_ready(tb);
if (ret)
goto err_ctl_stop;
}
tb_dbg(tb, "security level set to %s\n",
tb_security_names[tb->security_level]);
ret = device_add(&tb->dev);
if (ret)
goto err_ctl_stop;
/* Start the domain */
if (tb->cm_ops->start) {
ret = tb->cm_ops->start(tb);
if (ret)
goto err_domain_del;
}
/* This starts event processing */
mutex_unlock(&tb->lock);
device_init_wakeup(&tb->dev, true);
pm_runtime_no_callbacks(&tb->dev);
pm_runtime_set_active(&tb->dev);
pm_runtime_enable(&tb->dev);
pm_runtime_set_autosuspend_delay(&tb->dev, TB_AUTOSUSPEND_DELAY);
pm_runtime_mark_last_busy(&tb->dev);
pm_runtime_use_autosuspend(&tb->dev);
return 0;
err_domain_del:
device_del(&tb->dev);
err_ctl_stop:
tb_ctl_stop(tb->ctl);
mutex_unlock(&tb->lock);
return ret;
}
/**
* tb_domain_remove() - Removes and releases a domain
* @tb: Domain to remove
*
* Stops the domain, removes it from the system and releases all
* resources once the last reference has been released.
*/
void tb_domain_remove(struct tb *tb)
{
mutex_lock(&tb->lock);
if (tb->cm_ops->stop)
tb->cm_ops->stop(tb);
/* Stop the domain control traffic */
tb_ctl_stop(tb->ctl);
mutex_unlock(&tb->lock);
flush_workqueue(tb->wq);
device_unregister(&tb->dev);
}
/**
* tb_domain_suspend_noirq() - Suspend a domain
* @tb: Domain to suspend
*
* Suspends all devices in the domain and stops the control channel.
*/
int tb_domain_suspend_noirq(struct tb *tb)
{
int ret = 0;
/*
* The control channel interrupt is left enabled during suspend
* and taking the lock here prevents any events happening before
* we actually have stopped the domain and the control channel.
*/
mutex_lock(&tb->lock);
if (tb->cm_ops->suspend_noirq)
ret = tb->cm_ops->suspend_noirq(tb);
if (!ret)
tb_ctl_stop(tb->ctl);
mutex_unlock(&tb->lock);
return ret;
}
/**
* tb_domain_resume_noirq() - Resume a domain
* @tb: Domain to resume
*
* Re-starts the control channel, and resumes all devices connected to
* the domain.
*/
int tb_domain_resume_noirq(struct tb *tb)
{
int ret = 0;
mutex_lock(&tb->lock);
tb_ctl_start(tb->ctl);
if (tb->cm_ops->resume_noirq)
ret = tb->cm_ops->resume_noirq(tb);
mutex_unlock(&tb->lock);
return ret;
}
int tb_domain_suspend(struct tb *tb)
{
return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0;
}
int tb_domain_freeze_noirq(struct tb *tb)
{
int ret = 0;
mutex_lock(&tb->lock);
if (tb->cm_ops->freeze_noirq)
ret = tb->cm_ops->freeze_noirq(tb);
if (!ret)
tb_ctl_stop(tb->ctl);
mutex_unlock(&tb->lock);
return ret;
}
int tb_domain_thaw_noirq(struct tb *tb)
{
int ret = 0;
mutex_lock(&tb->lock);
tb_ctl_start(tb->ctl);
if (tb->cm_ops->thaw_noirq)
ret = tb->cm_ops->thaw_noirq(tb);
mutex_unlock(&tb->lock);
return ret;
}
void tb_domain_complete(struct tb *tb)
{
if (tb->cm_ops->complete)
tb->cm_ops->complete(tb);
}
int tb_domain_runtime_suspend(struct tb *tb)
{
if (tb->cm_ops->runtime_suspend) {
int ret = tb->cm_ops->runtime_suspend(tb);
if (ret)
return ret;
}
tb_ctl_stop(tb->ctl);
return 0;
}
int tb_domain_runtime_resume(struct tb *tb)
{
tb_ctl_start(tb->ctl);
if (tb->cm_ops->runtime_resume) {
int ret = tb->cm_ops->runtime_resume(tb);
if (ret)
return ret;
}
return 0;
}
/**
* tb_domain_disapprove_switch() - Disapprove switch
* @tb: Domain the switch belongs to
* @sw: Switch to disapprove
*
* This will disconnect PCIe tunnel from parent to this @sw.
*
* Return: %0 on success and negative errno in case of failure.
*/
int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw)
{
if (!tb->cm_ops->disapprove_switch)
return -EPERM;
return tb->cm_ops->disapprove_switch(tb, sw);
}
/**
* tb_domain_approve_switch() - Approve switch
* @tb: Domain the switch belongs to
* @sw: Switch to approve
*
* This will approve switch by connection manager specific means. In
* case of success the connection manager will create PCIe tunnel from
* parent to @sw.
*/
int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw)
{
struct tb_switch *parent_sw;
if (!tb->cm_ops->approve_switch)
return -EPERM;
/* The parent switch must be authorized before this one */
parent_sw = tb_to_switch(sw->dev.parent);
if (!parent_sw || !parent_sw->authorized)
return -EINVAL;
return tb->cm_ops->approve_switch(tb, sw);
}
/**
* tb_domain_approve_switch_key() - Approve switch and add key
* @tb: Domain the switch belongs to
* @sw: Switch to approve
*
* For switches that support secure connect, this function first adds
* key to the switch NVM using connection manager specific means. If
* adding the key is successful, the switch is approved and connected.
*
* Return: %0 on success and negative errno in case of failure.
*/
int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw)
{
struct tb_switch *parent_sw;
int ret;
if (!tb->cm_ops->approve_switch || !tb->cm_ops->add_switch_key)
return -EPERM;
/* The parent switch must be authorized before this one */
parent_sw = tb_to_switch(sw->dev.parent);
if (!parent_sw || !parent_sw->authorized)
return -EINVAL;
ret = tb->cm_ops->add_switch_key(tb, sw);
if (ret)
return ret;
return tb->cm_ops->approve_switch(tb, sw);
}
/**
* tb_domain_challenge_switch_key() - Challenge and approve switch
* @tb: Domain the switch belongs to
* @sw: Switch to approve
*
* For switches that support secure connect, this function generates
* random challenge and sends it to the switch. The switch responds to
* this and if the response matches our random challenge, the switch is
* approved and connected.
*
* Return: %0 on success and negative errno in case of failure.
*/
int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw)
{
u8 challenge[TB_SWITCH_KEY_SIZE];
u8 response[TB_SWITCH_KEY_SIZE];
u8 hmac[TB_SWITCH_KEY_SIZE];
struct tb_switch *parent_sw;
struct crypto_shash *tfm;
struct shash_desc *shash;
int ret;
if (!tb->cm_ops->approve_switch || !tb->cm_ops->challenge_switch_key)
return -EPERM;
/* The parent switch must be authorized before this one */
parent_sw = tb_to_switch(sw->dev.parent);
if (!parent_sw || !parent_sw->authorized)
return -EINVAL;
get_random_bytes(challenge, sizeof(challenge));
ret = tb->cm_ops->challenge_switch_key(tb, sw, challenge, response);
if (ret)
return ret;
tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
ret = crypto_shash_setkey(tfm, sw->key, TB_SWITCH_KEY_SIZE);
if (ret)
goto err_free_tfm;
shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
GFP_KERNEL);
if (!shash) {
ret = -ENOMEM;
goto err_free_tfm;
}
shash->tfm = tfm;
memset(hmac, 0, sizeof(hmac));
ret = crypto_shash_digest(shash, challenge, sizeof(hmac), hmac);
if (ret)
goto err_free_shash;
/* The returned HMAC must match the one we calculated */
if (memcmp(response, hmac, sizeof(hmac))) {
ret = -EKEYREJECTED;
goto err_free_shash;
}
crypto_free_shash(tfm);
kfree(shash);
return tb->cm_ops->approve_switch(tb, sw);
err_free_shash:
kfree(shash);
err_free_tfm:
crypto_free_shash(tfm);
return ret;
}
/**
* tb_domain_disconnect_pcie_paths() - Disconnect all PCIe paths
* @tb: Domain whose PCIe paths to disconnect
*
* This needs to be called in preparation for NVM upgrade of the host
* controller. Makes sure all PCIe paths are disconnected.
*
* Return %0 on success and negative errno in case of error.
*/
int tb_domain_disconnect_pcie_paths(struct tb *tb)
{
if (!tb->cm_ops->disconnect_pcie_paths)
return -EPERM;
return tb->cm_ops->disconnect_pcie_paths(tb);
}
/**
* tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain
* @tb: Domain enabling the DMA paths
* @xd: XDomain DMA paths are created to
* @transmit_path: HopID we are using to send out packets
* @transmit_ring: DMA ring used to send out packets
* @receive_path: HopID the other end is using to send packets to us
* @receive_ring: DMA ring used to receive packets from @receive_path
*
* Calls connection manager specific method to enable DMA paths to the
* XDomain in question.
*
* Return: 0% in case of success and negative errno otherwise. In
* particular returns %-ENOTSUPP if the connection manager
* implementation does not support XDomains.
*/
int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{
if (!tb->cm_ops->approve_xdomain_paths)
return -ENOTSUPP;
return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path,
transmit_ring, receive_path, receive_ring);
}
/**
* tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain
* @tb: Domain disabling the DMA paths
* @xd: XDomain whose DMA paths are disconnected
* @transmit_path: HopID we are using to send out packets
* @transmit_ring: DMA ring used to send out packets
* @receive_path: HopID the other end is using to send packets to us
* @receive_ring: DMA ring used to receive packets from @receive_path
*
* Calls connection manager specific method to disconnect DMA paths to
* the XDomain in question.
*
* Return: 0% in case of success and negative errno otherwise. In
* particular returns %-ENOTSUPP if the connection manager
* implementation does not support XDomains.
*/
int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd,
int transmit_path, int transmit_ring,
int receive_path, int receive_ring)
{
if (!tb->cm_ops->disconnect_xdomain_paths)
return -ENOTSUPP;
return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path,
transmit_ring, receive_path, receive_ring);
}
static int disconnect_xdomain(struct device *dev, void *data)
{
struct tb_xdomain *xd;
struct tb *tb = data;
int ret = 0;
xd = tb_to_xdomain(dev);
if (xd && xd->tb == tb)
ret = tb_xdomain_disable_all_paths(xd);
return ret;
}
/**
* tb_domain_disconnect_all_paths() - Disconnect all paths for the domain
* @tb: Domain whose paths are disconnected
*
* This function can be used to disconnect all paths (PCIe, XDomain) for
* example in preparation for host NVM firmware upgrade. After this is
* called the paths cannot be established without resetting the switch.
*
* Return: %0 in case of success and negative errno otherwise.
*/
int tb_domain_disconnect_all_paths(struct tb *tb)
{
int ret;
ret = tb_domain_disconnect_pcie_paths(tb);
if (ret)
return ret;
return bus_for_each_dev(&tb_bus_type, NULL, tb, disconnect_xdomain);
}
int tb_domain_init(void)
{
int ret;
tb_debugfs_init();
tb_acpi_init();
ret = tb_xdomain_init();
if (ret)
goto err_acpi;
ret = bus_register(&tb_bus_type);
if (ret)
goto err_xdomain;
return 0;
err_xdomain:
tb_xdomain_exit();
err_acpi:
tb_acpi_exit();
tb_debugfs_exit();
return ret;
}
void tb_domain_exit(void)
{
bus_unregister(&tb_bus_type);
ida_destroy(&tb_domain_ida);
tb_nvm_exit();
tb_xdomain_exit();
tb_acpi_exit();
tb_debugfs_exit();
}
| linux-master | drivers/thunderbolt/domain.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ACPI support
*
* Copyright (C) 2020, Intel Corporation
* Author: Mika Westerberg <[email protected]>
*/
#include <linux/acpi.h>
#include <linux/pm_runtime.h>
#include "tb.h"
static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
void **ret)
{
struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
struct fwnode_handle *fwnode;
struct tb_nhi *nhi = data;
struct pci_dev *pdev;
struct device *dev;
if (!adev)
return AE_OK;
fwnode = fwnode_find_reference(acpi_fwnode_handle(adev), "usb4-host-interface", 0);
if (IS_ERR(fwnode))
return AE_OK;
/* It needs to reference this NHI */
if (dev_fwnode(&nhi->pdev->dev) != fwnode)
goto out_put;
/*
* Try to find physical device walking upwards to the hierarcy.
* We need to do this because the xHCI driver might not yet be
* bound so the USB3 SuperSpeed ports are not yet created.
*/
do {
dev = acpi_get_first_physical_node(adev);
if (dev)
break;
adev = acpi_dev_parent(adev);
} while (adev);
/*
* Check that the device is PCIe. This is because USB3
* SuperSpeed ports have this property and they are not power
* managed with the xHCI and the SuperSpeed hub so we create the
* link from xHCI instead.
*/
while (dev && !dev_is_pci(dev))
dev = dev->parent;
if (!dev)
goto out_put;
/*
* Check that this actually matches the type of device we
* expect. It should either be xHCI or PCIe root/downstream
* port.
*/
pdev = to_pci_dev(dev);
if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI ||
(pci_is_pcie(pdev) &&
(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
const struct device_link *link;
/*
* Make them both active first to make sure the NHI does
* not runtime suspend before the consumer. The
* pm_runtime_put() below then allows the consumer to
* runtime suspend again (which then allows NHI runtime
* suspend too now that the device link is established).
*/
pm_runtime_get_sync(&pdev->dev);
link = device_link_add(&pdev->dev, &nhi->pdev->dev,
DL_FLAG_AUTOREMOVE_SUPPLIER |
DL_FLAG_RPM_ACTIVE |
DL_FLAG_PM_RUNTIME);
if (link) {
dev_dbg(&nhi->pdev->dev, "created link from %s\n",
dev_name(&pdev->dev));
*(bool *)ret = true;
} else {
dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
dev_name(&pdev->dev));
}
pm_runtime_put(&pdev->dev);
}
out_put:
fwnode_handle_put(fwnode);
return AE_OK;
}
/**
* tb_acpi_add_links() - Add device links based on ACPI description
* @nhi: Pointer to NHI
*
* Goes over ACPI namespace finding tunneled ports that reference to
* @nhi ACPI node. For each reference a device link is added. The link
* is automatically removed by the driver core.
*
* Returns %true if at least one link was created.
*/
bool tb_acpi_add_links(struct tb_nhi *nhi)
{
acpi_status status;
bool ret = false;
if (!has_acpi_companion(&nhi->pdev->dev))
return false;
/*
* Find all devices that have usb4-host-controller interface
* property that references to this NHI.
*/
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 32,
tb_acpi_add_link, NULL, nhi, (void **)&ret);
if (ACPI_FAILURE(status)) {
dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n");
return false;
}
return ret;
}
/**
* tb_acpi_is_native() - Did the platform grant native TBT/USB4 control
*
* Returns %true if the platform granted OS native control over
* TBT/USB4. In this case software based connection manager can be used,
* otherwise there is firmware based connection manager running.
*/
bool tb_acpi_is_native(void)
{
return osc_sb_native_usb4_support_confirmed &&
osc_sb_native_usb4_control;
}
/**
* tb_acpi_may_tunnel_usb3() - Is USB3 tunneling allowed by the platform
*
* When software based connection manager is used, this function
* returns %true if platform allows native USB3 tunneling.
*/
bool tb_acpi_may_tunnel_usb3(void)
{
if (tb_acpi_is_native())
return osc_sb_native_usb4_control & OSC_USB_USB3_TUNNELING;
return true;
}
/**
* tb_acpi_may_tunnel_dp() - Is DisplayPort tunneling allowed by the platform
*
* When software based connection manager is used, this function
* returns %true if platform allows native DP tunneling.
*/
bool tb_acpi_may_tunnel_dp(void)
{
if (tb_acpi_is_native())
return osc_sb_native_usb4_control & OSC_USB_DP_TUNNELING;
return true;
}
/**
* tb_acpi_may_tunnel_pcie() - Is PCIe tunneling allowed by the platform
*
* When software based connection manager is used, this function
* returns %true if platform allows native PCIe tunneling.
*/
bool tb_acpi_may_tunnel_pcie(void)
{
if (tb_acpi_is_native())
return osc_sb_native_usb4_control & OSC_USB_PCIE_TUNNELING;
return true;
}
/**
* tb_acpi_is_xdomain_allowed() - Are XDomain connections allowed
*
* When software based connection manager is used, this function
* returns %true if platform allows XDomain connections.
*/
bool tb_acpi_is_xdomain_allowed(void)
{
if (tb_acpi_is_native())
return osc_sb_native_usb4_control & OSC_USB_XDOMAIN;
return true;
}
/* UUID for retimer _DSM: e0053122-795b-4122-8a5e-57be1d26acb3 */
static const guid_t retimer_dsm_guid =
GUID_INIT(0xe0053122, 0x795b, 0x4122,
0x8a, 0x5e, 0x57, 0xbe, 0x1d, 0x26, 0xac, 0xb3);
#define RETIMER_DSM_QUERY_ONLINE_STATE 1
#define RETIMER_DSM_SET_ONLINE_STATE 2
static int tb_acpi_retimer_set_power(struct tb_port *port, bool power)
{
struct usb4_port *usb4 = port->usb4;
union acpi_object argv4[2];
struct acpi_device *adev;
union acpi_object *obj;
int ret;
if (!usb4->can_offline)
return 0;
adev = ACPI_COMPANION(&usb4->dev);
if (WARN_ON(!adev))
return 0;
/* Check if we are already powered on (and in correct mode) */
obj = acpi_evaluate_dsm_typed(adev->handle, &retimer_dsm_guid, 1,
RETIMER_DSM_QUERY_ONLINE_STATE, NULL,
ACPI_TYPE_INTEGER);
if (!obj) {
tb_port_warn(port, "ACPI: query online _DSM failed\n");
return -EIO;
}
ret = obj->integer.value;
ACPI_FREE(obj);
if (power == ret)
return 0;
tb_port_dbg(port, "ACPI: calling _DSM to power %s retimers\n",
power ? "on" : "off");
argv4[0].type = ACPI_TYPE_PACKAGE;
argv4[0].package.count = 1;
argv4[0].package.elements = &argv4[1];
argv4[1].integer.type = ACPI_TYPE_INTEGER;
argv4[1].integer.value = power;
obj = acpi_evaluate_dsm_typed(adev->handle, &retimer_dsm_guid, 1,
RETIMER_DSM_SET_ONLINE_STATE, argv4,
ACPI_TYPE_INTEGER);
if (!obj) {
tb_port_warn(port,
"ACPI: set online state _DSM evaluation failed\n");
return -EIO;
}
ret = obj->integer.value;
ACPI_FREE(obj);
if (ret >= 0) {
if (power)
return ret == 1 ? 0 : -EBUSY;
return 0;
}
tb_port_warn(port, "ACPI: set online state _DSM failed with error %d\n", ret);
return -EIO;
}
/**
* tb_acpi_power_on_retimers() - Call platform to power on retimers
* @port: USB4 port
*
* Calls platform to turn on power to all retimers behind this USB4
* port. After this function returns successfully the caller can
* continue with the normal retimer flows (as specified in the USB4
* spec). Note if this returns %-EBUSY it means the type-C port is in
* non-USB4/TBT mode (there is non-USB4/TBT device connected).
*
* This should only be called if the USB4/TBT link is not up.
*
* Returns %0 on success.
*/
int tb_acpi_power_on_retimers(struct tb_port *port)
{
return tb_acpi_retimer_set_power(port, true);
}
/**
* tb_acpi_power_off_retimers() - Call platform to power off retimers
* @port: USB4 port
*
* This is the opposite of tb_acpi_power_on_retimers(). After returning
* successfully the normal operations with the @port can continue.
*
* Returns %0 on success.
*/
int tb_acpi_power_off_retimers(struct tb_port *port)
{
return tb_acpi_retimer_set_power(port, false);
}
static bool tb_acpi_bus_match(struct device *dev)
{
return tb_is_switch(dev) || tb_is_usb4_port_device(dev);
}
static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw)
{
struct tb_switch *parent_sw = tb_switch_parent(sw);
struct acpi_device *adev = NULL;
/*
* Device routers exists under the downstream facing USB4 port
* of the parent router. Their _ADR is always 0.
*/
if (parent_sw) {
struct tb_port *port = tb_switch_downstream_port(sw);
struct acpi_device *port_adev;
port_adev = acpi_find_child_by_adr(ACPI_COMPANION(&parent_sw->dev),
port->port);
if (port_adev)
adev = acpi_find_child_device(port_adev, 0, false);
} else {
struct tb_nhi *nhi = sw->tb->nhi;
struct acpi_device *parent_adev;
parent_adev = ACPI_COMPANION(&nhi->pdev->dev);
if (parent_adev)
adev = acpi_find_child_device(parent_adev, 0, false);
}
return adev;
}
static struct acpi_device *tb_acpi_find_companion(struct device *dev)
{
/*
* The Thunderbolt/USB4 hierarchy looks like following:
*
* Device (NHI)
* Device (HR) // Host router _ADR == 0
* Device (DFP0) // Downstream port _ADR == lane 0 adapter
* Device (DR) // Device router _ADR == 0
* Device (UFP) // Upstream port _ADR == lane 0 adapter
* Device (DFP1) // Downstream port _ADR == lane 0 adapter number
*
* At the moment we bind the host router to the corresponding
* Linux device.
*/
if (tb_is_switch(dev))
return tb_acpi_switch_find_companion(tb_to_switch(dev));
if (tb_is_usb4_port_device(dev))
return acpi_find_child_by_adr(ACPI_COMPANION(dev->parent),
tb_to_usb4_port_device(dev)->port->port);
return NULL;
}
static void tb_acpi_setup(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
struct usb4_port *usb4 = tb_to_usb4_port_device(dev);
if (!adev || !usb4)
return;
if (acpi_check_dsm(adev->handle, &retimer_dsm_guid, 1,
BIT(RETIMER_DSM_QUERY_ONLINE_STATE) |
BIT(RETIMER_DSM_SET_ONLINE_STATE)))
usb4->can_offline = true;
}
static struct acpi_bus_type tb_acpi_bus = {
.name = "thunderbolt",
.match = tb_acpi_bus_match,
.find_companion = tb_acpi_find_companion,
.setup = tb_acpi_setup,
};
int tb_acpi_init(void)
{
return register_acpi_bus_type(&tb_acpi_bus);
}
void tb_acpi_exit(void)
{
unregister_acpi_bus_type(&tb_acpi_bus);
}
| linux-master | drivers/thunderbolt/acpi.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* AR71xx Reset Controller Driver
* Author: Alban Bedel
*
* Copyright (C) 2015 Alban Bedel <[email protected]>
*/
#include <linux/io.h>
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/reboot.h>
struct ath79_reset {
struct reset_controller_dev rcdev;
struct notifier_block restart_nb;
void __iomem *base;
spinlock_t lock;
};
#define FULL_CHIP_RESET 24
static int ath79_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct ath79_reset *ath79_reset =
container_of(rcdev, struct ath79_reset, rcdev);
unsigned long flags;
u32 val;
spin_lock_irqsave(&ath79_reset->lock, flags);
val = readl(ath79_reset->base);
if (assert)
val |= BIT(id);
else
val &= ~BIT(id);
writel(val, ath79_reset->base);
spin_unlock_irqrestore(&ath79_reset->lock, flags);
return 0;
}
static int ath79_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return ath79_reset_update(rcdev, id, true);
}
static int ath79_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return ath79_reset_update(rcdev, id, false);
}
static int ath79_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct ath79_reset *ath79_reset =
container_of(rcdev, struct ath79_reset, rcdev);
u32 val;
val = readl(ath79_reset->base);
return !!(val & BIT(id));
}
static const struct reset_control_ops ath79_reset_ops = {
.assert = ath79_reset_assert,
.deassert = ath79_reset_deassert,
.status = ath79_reset_status,
};
static int ath79_reset_restart_handler(struct notifier_block *nb,
unsigned long action, void *data)
{
struct ath79_reset *ath79_reset =
container_of(nb, struct ath79_reset, restart_nb);
ath79_reset_assert(&ath79_reset->rcdev, FULL_CHIP_RESET);
return NOTIFY_DONE;
}
static int ath79_reset_probe(struct platform_device *pdev)
{
struct ath79_reset *ath79_reset;
int err;
ath79_reset = devm_kzalloc(&pdev->dev,
sizeof(*ath79_reset), GFP_KERNEL);
if (!ath79_reset)
return -ENOMEM;
ath79_reset->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ath79_reset->base))
return PTR_ERR(ath79_reset->base);
spin_lock_init(&ath79_reset->lock);
ath79_reset->rcdev.ops = &ath79_reset_ops;
ath79_reset->rcdev.owner = THIS_MODULE;
ath79_reset->rcdev.of_node = pdev->dev.of_node;
ath79_reset->rcdev.of_reset_n_cells = 1;
ath79_reset->rcdev.nr_resets = 32;
err = devm_reset_controller_register(&pdev->dev, &ath79_reset->rcdev);
if (err)
return err;
ath79_reset->restart_nb.notifier_call = ath79_reset_restart_handler;
ath79_reset->restart_nb.priority = 128;
err = register_restart_handler(&ath79_reset->restart_nb);
if (err)
dev_warn(&pdev->dev, "Failed to register restart handler\n");
return 0;
}
static const struct of_device_id ath79_reset_dt_ids[] = {
{ .compatible = "qca,ar7100-reset", },
{ },
};
static struct platform_driver ath79_reset_driver = {
.probe = ath79_reset_probe,
.driver = {
.name = "ath79-reset",
.of_match_table = ath79_reset_dt_ids,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(ath79_reset_driver);
| linux-master | drivers/reset/reset-ath79.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Simple Reset Controller Driver
*
* Copyright (C) 2017 Pengutronix, Philipp Zabel <[email protected]>
*
* Based on Allwinner SoCs Reset Controller driver
*
* Copyright 2013 Maxime Ripard
*
* Maxime Ripard <[email protected]>
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/reset/reset-simple.h>
#include <linux/spinlock.h>
static inline struct reset_simple_data *
to_reset_simple_data(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct reset_simple_data, rcdev);
}
static int reset_simple_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct reset_simple_data *data = to_reset_simple_data(rcdev);
int reg_width = sizeof(u32);
int bank = id / (reg_width * BITS_PER_BYTE);
int offset = id % (reg_width * BITS_PER_BYTE);
unsigned long flags;
u32 reg;
spin_lock_irqsave(&data->lock, flags);
reg = readl(data->membase + (bank * reg_width));
if (assert ^ data->active_low)
reg |= BIT(offset);
else
reg &= ~BIT(offset);
writel(reg, data->membase + (bank * reg_width));
spin_unlock_irqrestore(&data->lock, flags);
return 0;
}
static int reset_simple_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return reset_simple_update(rcdev, id, true);
}
static int reset_simple_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return reset_simple_update(rcdev, id, false);
}
static int reset_simple_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct reset_simple_data *data = to_reset_simple_data(rcdev);
int ret;
if (!data->reset_us)
return -ENOTSUPP;
ret = reset_simple_assert(rcdev, id);
if (ret)
return ret;
usleep_range(data->reset_us, data->reset_us * 2);
return reset_simple_deassert(rcdev, id);
}
static int reset_simple_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct reset_simple_data *data = to_reset_simple_data(rcdev);
int reg_width = sizeof(u32);
int bank = id / (reg_width * BITS_PER_BYTE);
int offset = id % (reg_width * BITS_PER_BYTE);
u32 reg;
reg = readl(data->membase + (bank * reg_width));
return !(reg & BIT(offset)) ^ !data->status_active_low;
}
const struct reset_control_ops reset_simple_ops = {
.assert = reset_simple_assert,
.deassert = reset_simple_deassert,
.reset = reset_simple_reset,
.status = reset_simple_status,
};
EXPORT_SYMBOL_GPL(reset_simple_ops);
/**
* struct reset_simple_devdata - simple reset controller properties
* @reg_offset: offset between base address and first reset register.
* @nr_resets: number of resets. If not set, default to resource size in bits.
* @active_low: if true, bits are cleared to assert the reset. Otherwise, bits
* are set to assert the reset.
* @status_active_low: if true, bits read back as cleared while the reset is
* asserted. Otherwise, bits read back as set while the
* reset is asserted.
*/
struct reset_simple_devdata {
u32 reg_offset;
u32 nr_resets;
bool active_low;
bool status_active_low;
};
#define SOCFPGA_NR_BANKS 8
static const struct reset_simple_devdata reset_simple_socfpga = {
.reg_offset = 0x20,
.nr_resets = SOCFPGA_NR_BANKS * 32,
.status_active_low = true,
};
static const struct reset_simple_devdata reset_simple_active_low = {
.active_low = true,
.status_active_low = true,
};
static const struct of_device_id reset_simple_dt_ids[] = {
{ .compatible = "altr,stratix10-rst-mgr",
.data = &reset_simple_socfpga },
{ .compatible = "st,stm32-rcc", },
{ .compatible = "allwinner,sun6i-a31-clock-reset",
.data = &reset_simple_active_low },
{ .compatible = "zte,zx296718-reset",
.data = &reset_simple_active_low },
{ .compatible = "aspeed,ast2400-lpc-reset" },
{ .compatible = "aspeed,ast2500-lpc-reset" },
{ .compatible = "aspeed,ast2600-lpc-reset" },
{ .compatible = "bitmain,bm1880-reset",
.data = &reset_simple_active_low },
{ .compatible = "brcm,bcm4908-misc-pcie-reset",
.data = &reset_simple_active_low },
{ .compatible = "snps,dw-high-reset" },
{ .compatible = "snps,dw-low-reset",
.data = &reset_simple_active_low },
{ /* sentinel */ },
};
static int reset_simple_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct reset_simple_devdata *devdata;
struct reset_simple_data *data;
void __iomem *membase;
struct resource *res;
u32 reg_offset = 0;
devdata = of_device_get_match_data(dev);
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
membase = devm_ioremap_resource(dev, res);
if (IS_ERR(membase))
return PTR_ERR(membase);
spin_lock_init(&data->lock);
data->membase = membase;
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE;
data->rcdev.ops = &reset_simple_ops;
data->rcdev.of_node = dev->of_node;
if (devdata) {
reg_offset = devdata->reg_offset;
if (devdata->nr_resets)
data->rcdev.nr_resets = devdata->nr_resets;
data->active_low = devdata->active_low;
data->status_active_low = devdata->status_active_low;
}
data->membase += reg_offset;
return devm_reset_controller_register(dev, &data->rcdev);
}
static struct platform_driver reset_simple_driver = {
.probe = reset_simple_probe,
.driver = {
.name = "simple-reset",
.of_match_table = reset_simple_dt_ids,
},
};
builtin_platform_driver(reset_simple_driver);
| linux-master | drivers/reset/reset-simple.c |
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/*
* SP7021 reset driver
*
* Copyright (C) Sunplus Technology Co., Ltd.
* All rights reserved.
*/
#include <linux/io.h>
#include <linux/init.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/reboot.h>
/* HIWORD_MASK_REG BITS */
#define BITS_PER_HWM_REG 16
/* resets HW info: reg_index_shift */
static const u32 sp_resets[] = {
/* SP7021: mo_reset0 ~ mo_reset9 */
0x00,
0x02,
0x03,
0x04,
0x05,
0x06,
0x07,
0x08,
0x09,
0x0a,
0x0b,
0x0d,
0x0e,
0x0f,
0x10,
0x12,
0x14,
0x15,
0x16,
0x17,
0x18,
0x19,
0x1a,
0x1b,
0x1c,
0x1d,
0x1e,
0x1f,
0x20,
0x21,
0x22,
0x23,
0x24,
0x25,
0x26,
0x2a,
0x2b,
0x2d,
0x2e,
0x30,
0x31,
0x32,
0x33,
0x3d,
0x3e,
0x3f,
0x42,
0x44,
0x4b,
0x4c,
0x4d,
0x4e,
0x4f,
0x50,
0x55,
0x60,
0x61,
0x6a,
0x6f,
0x70,
0x73,
0x74,
0x86,
0x8a,
0x8b,
0x8d,
0x8e,
0x8f,
0x90,
0x92,
0x93,
0x94,
0x95,
0x96,
0x97,
0x98,
0x99,
};
struct sp_reset {
struct reset_controller_dev rcdev;
struct notifier_block notifier;
void __iomem *base;
};
static inline struct sp_reset *to_sp_reset(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct sp_reset, rcdev);
}
static int sp_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct sp_reset *reset = to_sp_reset(rcdev);
int index = sp_resets[id] / BITS_PER_HWM_REG;
int shift = sp_resets[id] % BITS_PER_HWM_REG;
u32 val;
val = (1 << (16 + shift)) | (assert << shift);
writel(val, reset->base + (index * 4));
return 0;
}
static int sp_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return sp_reset_update(rcdev, id, true);
}
static int sp_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return sp_reset_update(rcdev, id, false);
}
static int sp_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct sp_reset *reset = to_sp_reset(rcdev);
int index = sp_resets[id] / BITS_PER_HWM_REG;
int shift = sp_resets[id] % BITS_PER_HWM_REG;
u32 reg;
reg = readl(reset->base + (index * 4));
return !!(reg & BIT(shift));
}
static const struct reset_control_ops sp_reset_ops = {
.assert = sp_reset_assert,
.deassert = sp_reset_deassert,
.status = sp_reset_status,
};
static int sp_restart(struct notifier_block *nb, unsigned long mode,
void *cmd)
{
struct sp_reset *reset = container_of(nb, struct sp_reset, notifier);
sp_reset_assert(&reset->rcdev, 0);
sp_reset_deassert(&reset->rcdev, 0);
return NOTIFY_DONE;
}
static int sp_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct sp_reset *reset;
struct resource *res;
int ret;
reset = devm_kzalloc(dev, sizeof(*reset), GFP_KERNEL);
if (!reset)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reset->base = devm_ioremap_resource(dev, res);
if (IS_ERR(reset->base))
return PTR_ERR(reset->base);
reset->rcdev.ops = &sp_reset_ops;
reset->rcdev.owner = THIS_MODULE;
reset->rcdev.of_node = dev->of_node;
reset->rcdev.nr_resets = resource_size(res) / 4 * BITS_PER_HWM_REG;
ret = devm_reset_controller_register(dev, &reset->rcdev);
if (ret)
return ret;
reset->notifier.notifier_call = sp_restart;
reset->notifier.priority = 192;
return register_restart_handler(&reset->notifier);
}
static const struct of_device_id sp_reset_dt_ids[] = {
{.compatible = "sunplus,sp7021-reset",},
{ /* sentinel */ },
};
static struct platform_driver sp_reset_driver = {
.probe = sp_reset_probe,
.driver = {
.name = "sunplus-reset",
.of_match_table = sp_reset_dt_ids,
.suppress_bind_attrs = true,
},
};
builtin_platform_driver(sp_reset_driver);
| linux-master | drivers/reset/reset-sunplus.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Renesas RZ/G2L USBPHY control driver
*
* Copyright (C) 2021 Renesas Electronics Corporation
*/
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/reset-controller.h>
#define RESET 0x000
#define RESET_SEL_PLLRESET BIT(12)
#define RESET_PLLRESET BIT(8)
#define RESET_SEL_P2RESET BIT(5)
#define RESET_SEL_P1RESET BIT(4)
#define RESET_PHYRST_2 BIT(1)
#define RESET_PHYRST_1 BIT(0)
#define PHY_RESET_PORT2 (RESET_SEL_P2RESET | RESET_PHYRST_2)
#define PHY_RESET_PORT1 (RESET_SEL_P1RESET | RESET_PHYRST_1)
#define NUM_PORTS 2
struct rzg2l_usbphy_ctrl_priv {
struct reset_controller_dev rcdev;
struct reset_control *rstc;
void __iomem *base;
spinlock_t lock;
};
#define rcdev_to_priv(x) container_of(x, struct rzg2l_usbphy_ctrl_priv, rcdev)
static int rzg2l_usbphy_ctrl_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct rzg2l_usbphy_ctrl_priv *priv = rcdev_to_priv(rcdev);
u32 port_mask = PHY_RESET_PORT1 | PHY_RESET_PORT2;
void __iomem *base = priv->base;
unsigned long flags;
u32 val;
spin_lock_irqsave(&priv->lock, flags);
val = readl(base + RESET);
val |= id ? PHY_RESET_PORT2 : PHY_RESET_PORT1;
if (port_mask == (val & port_mask))
val |= RESET_PLLRESET;
writel(val, base + RESET);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int rzg2l_usbphy_ctrl_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct rzg2l_usbphy_ctrl_priv *priv = rcdev_to_priv(rcdev);
void __iomem *base = priv->base;
unsigned long flags;
u32 val;
spin_lock_irqsave(&priv->lock, flags);
val = readl(base + RESET);
val |= RESET_SEL_PLLRESET;
val &= ~(RESET_PLLRESET | (id ? PHY_RESET_PORT2 : PHY_RESET_PORT1));
writel(val, base + RESET);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int rzg2l_usbphy_ctrl_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct rzg2l_usbphy_ctrl_priv *priv = rcdev_to_priv(rcdev);
u32 port_mask;
port_mask = id ? PHY_RESET_PORT2 : PHY_RESET_PORT1;
return !!(readl(priv->base + RESET) & port_mask);
}
static const struct of_device_id rzg2l_usbphy_ctrl_match_table[] = {
{ .compatible = "renesas,rzg2l-usbphy-ctrl" },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, rzg2l_usbphy_ctrl_match_table);
static const struct reset_control_ops rzg2l_usbphy_ctrl_reset_ops = {
.assert = rzg2l_usbphy_ctrl_assert,
.deassert = rzg2l_usbphy_ctrl_deassert,
.status = rzg2l_usbphy_ctrl_status,
};
static int rzg2l_usbphy_ctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rzg2l_usbphy_ctrl_priv *priv;
unsigned long flags;
int error;
u32 val;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (IS_ERR(priv->rstc))
return dev_err_probe(dev, PTR_ERR(priv->rstc),
"failed to get reset\n");
error = reset_control_deassert(priv->rstc);
if (error)
return error;
priv->rcdev.ops = &rzg2l_usbphy_ctrl_reset_ops;
priv->rcdev.of_reset_n_cells = 1;
priv->rcdev.nr_resets = NUM_PORTS;
priv->rcdev.of_node = dev->of_node;
priv->rcdev.dev = dev;
error = devm_reset_controller_register(dev, &priv->rcdev);
if (error)
return error;
spin_lock_init(&priv->lock);
dev_set_drvdata(dev, priv);
pm_runtime_enable(&pdev->dev);
error = pm_runtime_resume_and_get(&pdev->dev);
if (error < 0) {
pm_runtime_disable(&pdev->dev);
reset_control_assert(priv->rstc);
return dev_err_probe(&pdev->dev, error, "pm_runtime_resume_and_get failed");
}
/* put pll and phy into reset state */
spin_lock_irqsave(&priv->lock, flags);
val = readl(priv->base + RESET);
val |= RESET_SEL_PLLRESET | RESET_PLLRESET | PHY_RESET_PORT2 | PHY_RESET_PORT1;
writel(val, priv->base + RESET);
spin_unlock_irqrestore(&priv->lock, flags);
return 0;
}
static int rzg2l_usbphy_ctrl_remove(struct platform_device *pdev)
{
struct rzg2l_usbphy_ctrl_priv *priv = dev_get_drvdata(&pdev->dev);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
reset_control_assert(priv->rstc);
return 0;
}
static struct platform_driver rzg2l_usbphy_ctrl_driver = {
.driver = {
.name = "rzg2l_usbphy_ctrl",
.of_match_table = rzg2l_usbphy_ctrl_match_table,
},
.probe = rzg2l_usbphy_ctrl_probe,
.remove = rzg2l_usbphy_ctrl_remove,
};
module_platform_driver(rzg2l_usbphy_ctrl_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Renesas RZ/G2L USBPHY Control");
MODULE_AUTHOR("[email protected]>");
| linux-master | drivers/reset/reset-rzg2l-usbphy-ctrl.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
*
* Copyright (C) 2010 John Crispin <[email protected]>
* Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
* Copyright (C) 2016 Martin Blumenstingl <[email protected]>
* Copyright (C) 2017 Hauke Mehrtens <[email protected]>
*/
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#define LANTIQ_RCU_RESET_TIMEOUT 10000
struct lantiq_rcu_reset_priv {
struct reset_controller_dev rcdev;
struct device *dev;
struct regmap *regmap;
u32 reset_offset;
u32 status_offset;
};
static struct lantiq_rcu_reset_priv *to_lantiq_rcu_reset_priv(
struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct lantiq_rcu_reset_priv, rcdev);
}
static int lantiq_rcu_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct lantiq_rcu_reset_priv *priv = to_lantiq_rcu_reset_priv(rcdev);
unsigned int status = (id >> 8) & 0x1f;
u32 val;
int ret;
ret = regmap_read(priv->regmap, priv->status_offset, &val);
if (ret)
return ret;
return !!(val & BIT(status));
}
static int lantiq_rcu_reset_status_timeout(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
int ret;
int retry = LANTIQ_RCU_RESET_TIMEOUT;
do {
ret = lantiq_rcu_reset_status(rcdev, id);
if (ret < 0)
return ret;
if (ret == assert)
return 0;
usleep_range(20, 40);
} while (--retry);
return -ETIMEDOUT;
}
static int lantiq_rcu_reset_update(struct reset_controller_dev *rcdev,
unsigned long id, bool assert)
{
struct lantiq_rcu_reset_priv *priv = to_lantiq_rcu_reset_priv(rcdev);
unsigned int set = id & 0x1f;
u32 val = assert ? BIT(set) : 0;
int ret;
ret = regmap_update_bits(priv->regmap, priv->reset_offset, BIT(set),
val);
if (ret) {
dev_err(priv->dev, "Failed to set reset bit %u\n", set);
return ret;
}
ret = lantiq_rcu_reset_status_timeout(rcdev, id, assert);
if (ret)
dev_err(priv->dev, "Failed to %s bit %u\n",
assert ? "assert" : "deassert", set);
return ret;
}
static int lantiq_rcu_reset_assert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return lantiq_rcu_reset_update(rcdev, id, true);
}
static int lantiq_rcu_reset_deassert(struct reset_controller_dev *rcdev,
unsigned long id)
{
return lantiq_rcu_reset_update(rcdev, id, false);
}
static int lantiq_rcu_reset_reset(struct reset_controller_dev *rcdev,
unsigned long id)
{
int ret;
ret = lantiq_rcu_reset_assert(rcdev, id);
if (ret)
return ret;
return lantiq_rcu_reset_deassert(rcdev, id);
}
static const struct reset_control_ops lantiq_rcu_reset_ops = {
.assert = lantiq_rcu_reset_assert,
.deassert = lantiq_rcu_reset_deassert,
.status = lantiq_rcu_reset_status,
.reset = lantiq_rcu_reset_reset,
};
static int lantiq_rcu_reset_of_parse(struct platform_device *pdev,
struct lantiq_rcu_reset_priv *priv)
{
struct device *dev = &pdev->dev;
const __be32 *offset;
priv->regmap = syscon_node_to_regmap(dev->of_node->parent);
if (IS_ERR(priv->regmap)) {
dev_err(&pdev->dev, "Failed to lookup RCU regmap\n");
return PTR_ERR(priv->regmap);
}
offset = of_get_address(dev->of_node, 0, NULL, NULL);
if (!offset) {
dev_err(&pdev->dev, "Failed to get RCU reset offset\n");
return -ENOENT;
}
priv->reset_offset = __be32_to_cpu(*offset);
offset = of_get_address(dev->of_node, 1, NULL, NULL);
if (!offset) {
dev_err(&pdev->dev, "Failed to get RCU status offset\n");
return -ENOENT;
}
priv->status_offset = __be32_to_cpu(*offset);
return 0;
}
static int lantiq_rcu_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *reset_spec)
{
unsigned int status, set;
set = reset_spec->args[0];
status = reset_spec->args[1];
if (set >= rcdev->nr_resets || status >= rcdev->nr_resets)
return -EINVAL;
return (status << 8) | set;
}
static int lantiq_rcu_reset_probe(struct platform_device *pdev)
{
struct lantiq_rcu_reset_priv *priv;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &pdev->dev;
err = lantiq_rcu_reset_of_parse(pdev, priv);
if (err)
return err;
priv->rcdev.ops = &lantiq_rcu_reset_ops;
priv->rcdev.owner = THIS_MODULE;
priv->rcdev.of_node = pdev->dev.of_node;
priv->rcdev.nr_resets = 32;
priv->rcdev.of_xlate = lantiq_rcu_reset_xlate;
priv->rcdev.of_reset_n_cells = 2;
return devm_reset_controller_register(&pdev->dev, &priv->rcdev);
}
static const struct of_device_id lantiq_rcu_reset_dt_ids[] = {
{ .compatible = "lantiq,danube-reset", },
{ .compatible = "lantiq,xrx200-reset", },
{ },
};
MODULE_DEVICE_TABLE(of, lantiq_rcu_reset_dt_ids);
static struct platform_driver lantiq_rcu_reset_driver = {
.probe = lantiq_rcu_reset_probe,
.driver = {
.name = "lantiq-reset",
.of_match_table = lantiq_rcu_reset_dt_ids,
},
};
module_platform_driver(lantiq_rcu_reset_driver);
MODULE_AUTHOR("Martin Blumenstingl <[email protected]>");
MODULE_DESCRIPTION("Lantiq XWAY RCU Reset Controller Driver");
| linux-master | drivers/reset/reset-lantiq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2019 Intel Corporation.
* Lei Chuanhua <[email protected]>
*/
#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
#define RCU_RST_STAT 0x0024
#define RCU_RST_REQ 0x0048
#define REG_OFFSET_MASK GENMASK(31, 16)
#define BIT_OFFSET_MASK GENMASK(15, 8)
#define STAT_BIT_OFFSET_MASK GENMASK(7, 0)
#define to_reset_data(x) container_of(x, struct intel_reset_data, rcdev)
struct intel_reset_soc {
bool legacy;
u32 reset_cell_count;
};
struct intel_reset_data {
struct reset_controller_dev rcdev;
struct notifier_block restart_nb;
const struct intel_reset_soc *soc_data;
struct regmap *regmap;
struct device *dev;
u32 reboot_id;
};
static const struct regmap_config intel_rcu_regmap_config = {
.name = "intel-reset",
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.fast_io = true,
};
/*
* Reset status register offset relative to
* the reset control register(X) is X + 4
*/
static u32 id_to_reg_and_bit_offsets(struct intel_reset_data *data,
unsigned long id, u32 *rst_req,
u32 *req_bit, u32 *stat_bit)
{
*rst_req = FIELD_GET(REG_OFFSET_MASK, id);
*req_bit = FIELD_GET(BIT_OFFSET_MASK, id);
if (data->soc_data->legacy)
*stat_bit = FIELD_GET(STAT_BIT_OFFSET_MASK, id);
else
*stat_bit = *req_bit;
if (data->soc_data->legacy && *rst_req == RCU_RST_REQ)
return RCU_RST_STAT;
else
return *rst_req + 0x4;
}
static int intel_set_clr_bits(struct intel_reset_data *data, unsigned long id,
bool set)
{
u32 rst_req, req_bit, rst_stat, stat_bit, val;
int ret;
rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
&req_bit, &stat_bit);
val = set ? BIT(req_bit) : 0;
ret = regmap_update_bits(data->regmap, rst_req, BIT(req_bit), val);
if (ret)
return ret;
return regmap_read_poll_timeout(data->regmap, rst_stat, val,
set == !!(val & BIT(stat_bit)), 20,
200);
}
static int intel_assert_device(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct intel_reset_data *data = to_reset_data(rcdev);
int ret;
ret = intel_set_clr_bits(data, id, true);
if (ret)
dev_err(data->dev, "Reset assert failed %d\n", ret);
return ret;
}
static int intel_deassert_device(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct intel_reset_data *data = to_reset_data(rcdev);
int ret;
ret = intel_set_clr_bits(data, id, false);
if (ret)
dev_err(data->dev, "Reset deassert failed %d\n", ret);
return ret;
}
static int intel_reset_status(struct reset_controller_dev *rcdev,
unsigned long id)
{
struct intel_reset_data *data = to_reset_data(rcdev);
u32 rst_req, req_bit, rst_stat, stat_bit, val;
int ret;
rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
&req_bit, &stat_bit);
ret = regmap_read(data->regmap, rst_stat, &val);
if (ret)
return ret;
return !!(val & BIT(stat_bit));
}
static const struct reset_control_ops intel_reset_ops = {
.assert = intel_assert_device,
.deassert = intel_deassert_device,
.status = intel_reset_status,
};
static int intel_reset_xlate(struct reset_controller_dev *rcdev,
const struct of_phandle_args *spec)
{
struct intel_reset_data *data = to_reset_data(rcdev);
u32 id;
if (spec->args[1] > 31)
return -EINVAL;
id = FIELD_PREP(REG_OFFSET_MASK, spec->args[0]);
id |= FIELD_PREP(BIT_OFFSET_MASK, spec->args[1]);
if (data->soc_data->legacy) {
if (spec->args[2] > 31)
return -EINVAL;
id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, spec->args[2]);
}
return id;
}
static int intel_reset_restart_handler(struct notifier_block *nb,
unsigned long action, void *data)
{
struct intel_reset_data *reset_data;
reset_data = container_of(nb, struct intel_reset_data, restart_nb);
intel_assert_device(&reset_data->rcdev, reset_data->reboot_id);
return NOTIFY_DONE;
}
static int intel_reset_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct intel_reset_data *data;
void __iomem *base;
u32 rb_id[3];
int ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->soc_data = of_device_get_match_data(dev);
if (!data->soc_data)
return -ENODEV;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
data->regmap = devm_regmap_init_mmio(dev, base,
&intel_rcu_regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(dev, "regmap initialization failed\n");
return PTR_ERR(data->regmap);
}
ret = device_property_read_u32_array(dev, "intel,global-reset", rb_id,
data->soc_data->reset_cell_count);
if (ret) {
dev_err(dev, "Failed to get global reset offset!\n");
return ret;
}
data->dev = dev;
data->rcdev.of_node = np;
data->rcdev.owner = dev->driver->owner;
data->rcdev.ops = &intel_reset_ops;
data->rcdev.of_xlate = intel_reset_xlate;
data->rcdev.of_reset_n_cells = data->soc_data->reset_cell_count;
ret = devm_reset_controller_register(&pdev->dev, &data->rcdev);
if (ret)
return ret;
data->reboot_id = FIELD_PREP(REG_OFFSET_MASK, rb_id[0]);
data->reboot_id |= FIELD_PREP(BIT_OFFSET_MASK, rb_id[1]);
if (data->soc_data->legacy)
data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, rb_id[2]);
data->restart_nb.notifier_call = intel_reset_restart_handler;
data->restart_nb.priority = 128;
register_restart_handler(&data->restart_nb);
return 0;
}
static const struct intel_reset_soc xrx200_data = {
.legacy = true,
.reset_cell_count = 3,
};
static const struct intel_reset_soc lgm_data = {
.legacy = false,
.reset_cell_count = 2,
};
static const struct of_device_id intel_reset_match[] = {
{ .compatible = "intel,rcu-lgm", .data = &lgm_data },
{ .compatible = "intel,rcu-xrx200", .data = &xrx200_data },
{}
};
static struct platform_driver intel_reset_driver = {
.probe = intel_reset_probe,
.driver = {
.name = "intel-reset",
.of_match_table = intel_reset_match,
},
};
static int __init intel_reset_init(void)
{
return platform_driver_register(&intel_reset_driver);
}
/*
* RCU is system core entity which is in Always On Domain whose clocks
* or resource initialization happens in system core initialization.
* Also, it is required for most of the platform or architecture
* specific devices to perform reset operation as part of initialization.
* So perform RCU as post core initialization.
*/
postcore_initcall(intel_reset_init);
| linux-master | drivers/reset/reset-intel-gw.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Raspberry Pi 4 firmware reset driver
*
* Copyright (C) 2020 Nicolas Saenz Julienne <[email protected]>
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <soc/bcm2835/raspberrypi-firmware.h>
#include <dt-bindings/reset/raspberrypi,firmware-reset.h>
struct rpi_reset {
struct reset_controller_dev rcdev;
struct rpi_firmware *fw;
};
static inline struct rpi_reset *to_rpi(struct reset_controller_dev *rcdev)
{
return container_of(rcdev, struct rpi_reset, rcdev);
}
static int rpi_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
{
struct rpi_reset *priv = to_rpi(rcdev);
u32 dev_addr;
int ret;
switch (id) {
case RASPBERRYPI_FIRMWARE_RESET_ID_USB:
/*
* The Raspberry Pi 4 gets its USB functionality from VL805, a
* PCIe chip that implements xHCI. After a PCI reset, VL805's
* firmware may either be loaded directly from an EEPROM or, if
* not present, by the SoC's co-processor, VideoCore. rpi's
* VideoCore OS contains both the non public firmware load
* logic and the VL805 firmware blob. This triggers the
* aforementioned process.
*
* The pci device address is expected is expected by the
* firmware encoded like this:
*
* PCI_BUS << 20 | PCI_SLOT << 15 | PCI_FUNC << 12
*
* But since rpi's PCIe is hardwired, we know the address in
* advance.
*/
dev_addr = 0x100000;
ret = rpi_firmware_property(priv->fw, RPI_FIRMWARE_NOTIFY_XHCI_RESET,
&dev_addr, sizeof(dev_addr));
if (ret)
return ret;
/* Wait for vl805 to startup */
usleep_range(200, 1000);
break;
default:
return -EINVAL;
}
return 0;
}
static const struct reset_control_ops rpi_reset_ops = {
.reset = rpi_reset_reset,
};
static int rpi_reset_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rpi_firmware *fw;
struct device_node *np;
struct rpi_reset *priv;
np = of_get_parent(dev->of_node);
if (!np) {
dev_err(dev, "Missing firmware node\n");
return -ENOENT;
}
fw = devm_rpi_firmware_get(&pdev->dev, np);
of_node_put(np);
if (!fw)
return -EPROBE_DEFER;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_set_drvdata(dev, priv);
priv->fw = fw;
priv->rcdev.owner = THIS_MODULE;
priv->rcdev.nr_resets = RASPBERRYPI_FIRMWARE_RESET_NUM_IDS;
priv->rcdev.ops = &rpi_reset_ops;
priv->rcdev.of_node = dev->of_node;
return devm_reset_controller_register(dev, &priv->rcdev);
}
static const struct of_device_id rpi_reset_of_match[] = {
{ .compatible = "raspberrypi,firmware-reset" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, rpi_reset_of_match);
static struct platform_driver rpi_reset_driver = {
.probe = rpi_reset_probe,
.driver = {
.name = "raspberrypi-reset",
.of_match_table = rpi_reset_of_match,
},
};
module_platform_driver(rpi_reset_driver);
MODULE_AUTHOR("Nicolas Saenz Julienne <[email protected]>");
MODULE_DESCRIPTION("Raspberry Pi 4 firmware reset driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/reset/reset-raspberrypi.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.