2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * Maintained at www.Open-FCoE.org
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
28 * fc_rport's represent N_Port's within the fabric.
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
58 struct workqueue_struct *rport_event_queue;
60 static void fc_rport_enter_plogi(struct fc_rport_priv *);
61 static void fc_rport_enter_prli(struct fc_rport_priv *);
62 static void fc_rport_enter_rtv(struct fc_rport_priv *);
63 static void fc_rport_enter_ready(struct fc_rport_priv *);
64 static void fc_rport_enter_logo(struct fc_rport_priv *);
66 static void fc_rport_recv_plogi_req(struct fc_lport *,
67 struct fc_seq *, struct fc_frame *);
68 static void fc_rport_recv_prli_req(struct fc_rport_priv *,
69 struct fc_seq *, struct fc_frame *);
70 static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
71 struct fc_seq *, struct fc_frame *);
72 static void fc_rport_recv_logo_req(struct fc_lport *,
73 struct fc_seq *, struct fc_frame *);
74 static void fc_rport_timeout(struct work_struct *);
75 static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
76 static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
77 static void fc_rport_work(struct work_struct *);
79 static const char *fc_rport_state_names[] = {
80 [RPORT_ST_INIT] = "Init",
81 [RPORT_ST_PLOGI] = "PLOGI",
82 [RPORT_ST_PRLI] = "PRLI",
83 [RPORT_ST_RTV] = "RTV",
84 [RPORT_ST_READY] = "Ready",
85 [RPORT_ST_LOGO] = "LOGO",
86 [RPORT_ST_DELETE] = "Delete",
90 * fc_rport_lookup() - lookup a remote port by port_id
91 * @lport: Fibre Channel host port instance
92 * @port_id: remote port port_id to match
94 static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
97 struct fc_rport_priv *rdata;
99 list_for_each_entry(rdata, &lport->disc.rports, peers)
100 if (rdata->ids.port_id == port_id &&
101 rdata->rp_state != RPORT_ST_DELETE)
107 * fc_rport_create() - Create a new remote port
108 * @lport: The local port that the new remote port is for
109 * @port_id: The port ID for the new remote port
111 * Locking note: must be called with the disc_mutex held.
113 static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
116 struct fc_rport_priv *rdata;
118 rdata = lport->tt.rport_lookup(lport, port_id);
122 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
126 rdata->ids.node_name = -1;
127 rdata->ids.port_name = -1;
128 rdata->ids.port_id = port_id;
129 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
131 kref_init(&rdata->kref);
132 mutex_init(&rdata->rp_mutex);
133 rdata->local_port = lport;
134 rdata->rp_state = RPORT_ST_INIT;
135 rdata->event = RPORT_EV_NONE;
136 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
137 rdata->e_d_tov = lport->e_d_tov;
138 rdata->r_a_tov = lport->r_a_tov;
139 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
140 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
141 INIT_WORK(&rdata->event_work, fc_rport_work);
142 if (port_id != FC_FID_DIR_SERV)
143 list_add(&rdata->peers, &lport->disc.rports);
148 * fc_rport_destroy() - free a remote port after last reference is released.
149 * @kref: pointer to kref inside struct fc_rport_priv
151 static void fc_rport_destroy(struct kref *kref)
153 struct fc_rport_priv *rdata;
155 rdata = container_of(kref, struct fc_rport_priv, kref);
160 * fc_rport_state() - return a string for the state the rport is in
161 * @rdata: remote port private data
163 static const char *fc_rport_state(struct fc_rport_priv *rdata)
167 cp = fc_rport_state_names[rdata->rp_state];
174 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
175 * @rport: Pointer to Fibre Channel remote port structure
176 * @timeout: timeout in seconds
178 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
181 rport->dev_loss_tmo = timeout + 5;
183 rport->dev_loss_tmo = 30;
185 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
188 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
189 * @flp: FLOGI payload structure
190 * @maxval: upper limit, may be less than what is in the service parameters
192 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
198 * Get max payload from the common service parameters and the
199 * class 3 receive data field size.
201 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
202 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
204 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
205 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
211 * fc_rport_state_enter() - Change the rport's state
212 * @rdata: The rport whose state should change
213 * @new: The new state of the rport
215 * Locking Note: Called with the rport lock held
217 static void fc_rport_state_enter(struct fc_rport_priv *rdata,
218 enum fc_rport_state new)
220 if (rdata->rp_state != new)
222 rdata->rp_state = new;
225 static void fc_rport_work(struct work_struct *work)
228 struct fc_rport_priv *rdata =
229 container_of(work, struct fc_rport_priv, event_work);
230 struct fc_rport_libfc_priv *rp;
231 enum fc_rport_event event;
232 struct fc_lport *lport = rdata->local_port;
233 struct fc_rport_operations *rport_ops;
234 struct fc_rport_identifiers ids;
235 struct fc_rport *rport;
237 mutex_lock(&rdata->rp_mutex);
238 event = rdata->event;
239 rport_ops = rdata->ops;
240 rport = rdata->rport;
242 FC_RPORT_DBG(rdata, "work event %u\n", event);
247 rdata->event = RPORT_EV_NONE;
248 kref_get(&rdata->kref);
249 mutex_unlock(&rdata->rp_mutex);
252 rport = fc_remote_port_add(lport->host, 0, &ids);
254 FC_RPORT_DBG(rdata, "Failed to add the rport\n");
255 lport->tt.rport_logoff(rdata);
256 kref_put(&rdata->kref, lport->tt.rport_destroy);
259 mutex_lock(&rdata->rp_mutex);
261 FC_RPORT_DBG(rdata, "rport already allocated\n");
262 rdata->rport = rport;
263 rport->maxframe_size = rdata->maxframe_size;
264 rport->supported_classes = rdata->supported_classes;
267 rp->local_port = lport;
268 rp->rp_state = rdata->rp_state;
269 rp->flags = rdata->flags;
270 rp->e_d_tov = rdata->e_d_tov;
271 rp->r_a_tov = rdata->r_a_tov;
272 mutex_unlock(&rdata->rp_mutex);
274 if (rport_ops && rport_ops->event_callback) {
275 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
276 rport_ops->event_callback(lport, rdata, event);
278 kref_put(&rdata->kref, lport->tt.rport_destroy);
281 case RPORT_EV_FAILED:
284 port_id = rdata->ids.port_id;
285 mutex_unlock(&rdata->rp_mutex);
287 if (port_id != FC_FID_DIR_SERV) {
288 mutex_lock(&lport->disc.disc_mutex);
289 list_del(&rdata->peers);
290 mutex_unlock(&lport->disc.disc_mutex);
293 if (rport_ops && rport_ops->event_callback) {
294 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
295 rport_ops->event_callback(lport, rdata, event);
297 cancel_delayed_work_sync(&rdata->retry_work);
300 * Reset any outstanding exchanges before freeing rport.
302 lport->tt.exch_mgr_reset(lport, 0, port_id);
303 lport->tt.exch_mgr_reset(lport, port_id, 0);
307 rp->rp_state = RPORT_ST_DELETE;
308 mutex_lock(&rdata->rp_mutex);
310 mutex_unlock(&rdata->rp_mutex);
311 fc_remote_port_delete(rport);
313 kref_put(&rdata->kref, lport->tt.rport_destroy);
317 mutex_unlock(&rdata->rp_mutex);
323 * fc_rport_login() - Start the remote port login state machine
324 * @rdata: private remote port
326 * Locking Note: Called without the rport lock held. This
327 * function will hold the rport lock, call an _enter_*
328 * function and then unlock the rport.
330 int fc_rport_login(struct fc_rport_priv *rdata)
332 mutex_lock(&rdata->rp_mutex);
334 FC_RPORT_DBG(rdata, "Login to port\n");
336 fc_rport_enter_plogi(rdata);
338 mutex_unlock(&rdata->rp_mutex);
344 * fc_rport_enter_delete() - schedule a remote port to be deleted.
345 * @rdata: private remote port
346 * @event: event to report as the reason for deletion
348 * Locking Note: Called with the rport lock held.
350 * Allow state change into DELETE only once.
352 * Call queue_work only if there's no event already pending.
353 * Set the new event so that the old pending event will not occur.
354 * Since we have the mutex, even if fc_rport_work() is already started,
355 * it'll see the new event.
357 static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
358 enum fc_rport_event event)
360 if (rdata->rp_state == RPORT_ST_DELETE)
363 FC_RPORT_DBG(rdata, "Delete port\n");
365 fc_rport_state_enter(rdata, RPORT_ST_DELETE);
367 if (rdata->event == RPORT_EV_NONE)
368 queue_work(rport_event_queue, &rdata->event_work);
369 rdata->event = event;
373 * fc_rport_logoff() - Logoff and remove an rport
374 * @rdata: private remote port
376 * Locking Note: Called without the rport lock held. This
377 * function will hold the rport lock, call an _enter_*
378 * function and then unlock the rport.
380 int fc_rport_logoff(struct fc_rport_priv *rdata)
382 mutex_lock(&rdata->rp_mutex);
384 FC_RPORT_DBG(rdata, "Remove port\n");
386 if (rdata->rp_state == RPORT_ST_DELETE) {
387 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
388 mutex_unlock(&rdata->rp_mutex);
392 fc_rport_enter_logo(rdata);
395 * Change the state to Delete so that we discard
398 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
399 mutex_unlock(&rdata->rp_mutex);
406 * fc_rport_enter_ready() - The rport is ready
407 * @rdata: private remote port
409 * Locking Note: The rport lock is expected to be held before calling
412 static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
414 fc_rport_state_enter(rdata, RPORT_ST_READY);
416 FC_RPORT_DBG(rdata, "Port is Ready\n");
418 if (rdata->event == RPORT_EV_NONE)
419 queue_work(rport_event_queue, &rdata->event_work);
420 rdata->event = RPORT_EV_READY;
424 * fc_rport_timeout() - Handler for the retry_work timer.
425 * @work: The work struct of the fc_rport_priv
427 * Locking Note: Called without the rport lock held. This
428 * function will hold the rport lock, call an _enter_*
429 * function and then unlock the rport.
431 static void fc_rport_timeout(struct work_struct *work)
433 struct fc_rport_priv *rdata =
434 container_of(work, struct fc_rport_priv, retry_work.work);
436 mutex_lock(&rdata->rp_mutex);
438 switch (rdata->rp_state) {
440 fc_rport_enter_plogi(rdata);
443 fc_rport_enter_prli(rdata);
446 fc_rport_enter_rtv(rdata);
449 fc_rport_enter_logo(rdata);
453 case RPORT_ST_DELETE:
457 mutex_unlock(&rdata->rp_mutex);
461 * fc_rport_error() - Error handler, called once retries have been exhausted
462 * @rdata: private remote port
463 * @fp: The frame pointer
465 * Locking Note: The rport lock is expected to be held before
466 * calling this routine
468 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
470 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
471 IS_ERR(fp) ? -PTR_ERR(fp) : 0,
472 fc_rport_state(rdata), rdata->retries);
474 switch (rdata->rp_state) {
478 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
481 fc_rport_enter_ready(rdata);
483 case RPORT_ST_DELETE:
491 * fc_rport_error_retry() - Error handler when retries are desired
492 * @rdata: private remote port data
493 * @fp: The frame pointer
495 * If the error was an exchange timeout retry immediately,
496 * otherwise wait for E_D_TOV.
498 * Locking Note: The rport lock is expected to be held before
499 * calling this routine
501 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
504 unsigned long delay = FC_DEF_E_D_TOV;
506 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
507 if (PTR_ERR(fp) == -FC_EX_CLOSED)
508 return fc_rport_error(rdata, fp);
510 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
511 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
512 PTR_ERR(fp), fc_rport_state(rdata));
514 /* no additional delay on exchange timeouts */
515 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
517 schedule_delayed_work(&rdata->retry_work, delay);
521 return fc_rport_error(rdata, fp);
525 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
526 * @sp: current sequence in the PLOGI exchange
527 * @fp: response frame
528 * @rdata_arg: private remote port data
530 * Locking Note: This function will be called without the rport lock
531 * held, but it will lock, call an _enter_* function or fc_rport_error
532 * and then unlock the rport.
534 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
537 struct fc_rport_priv *rdata = rdata_arg;
538 struct fc_lport *lport = rdata->local_port;
539 struct fc_els_flogi *plp = NULL;
545 mutex_lock(&rdata->rp_mutex);
547 FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
549 if (rdata->rp_state != RPORT_ST_PLOGI) {
550 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
551 "%s\n", fc_rport_state(rdata));
558 fc_rport_error_retry(rdata, fp);
562 op = fc_frame_payload_op(fp);
563 if (op == ELS_LS_ACC &&
564 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
565 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
566 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
568 tov = ntohl(plp->fl_csp.sp_e_d_tov);
569 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
571 if (tov > rdata->e_d_tov)
572 rdata->e_d_tov = tov;
573 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
574 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
575 if (cssp_seq < csp_seq)
577 rdata->max_seq = csp_seq;
578 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
579 fc_rport_enter_prli(rdata);
581 fc_rport_error_retry(rdata, fp);
586 mutex_unlock(&rdata->rp_mutex);
587 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
591 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
592 * @rdata: private remote port data
594 * Locking Note: The rport lock is expected to be held before calling
597 static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
599 struct fc_lport *lport = rdata->local_port;
602 FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
603 fc_rport_state(rdata));
605 fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
607 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
608 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
610 fc_rport_error_retry(rdata, fp);
613 rdata->e_d_tov = lport->e_d_tov;
615 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
616 fc_rport_plogi_resp, rdata, lport->e_d_tov))
617 fc_rport_error_retry(rdata, fp);
619 kref_get(&rdata->kref);
623 * fc_rport_prli_resp() - Process Login (PRLI) response handler
624 * @sp: current sequence in the PRLI exchange
625 * @fp: response frame
626 * @rdata_arg: private remote port data
628 * Locking Note: This function will be called without the rport lock
629 * held, but it will lock, call an _enter_* function or fc_rport_error
630 * and then unlock the rport.
632 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
635 struct fc_rport_priv *rdata = rdata_arg;
637 struct fc_els_prli prli;
638 struct fc_els_spp spp;
640 u32 roles = FC_RPORT_ROLE_UNKNOWN;
644 mutex_lock(&rdata->rp_mutex);
646 FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
648 if (rdata->rp_state != RPORT_ST_PRLI) {
649 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
650 "%s\n", fc_rport_state(rdata));
657 fc_rport_error_retry(rdata, fp);
661 /* reinitialize remote port roles */
662 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
664 op = fc_frame_payload_op(fp);
665 if (op == ELS_LS_ACC) {
666 pp = fc_frame_payload_get(fp, sizeof(*pp));
667 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
668 fcp_parm = ntohl(pp->spp.spp_params);
669 if (fcp_parm & FCP_SPPF_RETRY)
670 rdata->flags |= FC_RP_FLAGS_RETRY;
673 rdata->supported_classes = FC_COS_CLASS3;
674 if (fcp_parm & FCP_SPPF_INIT_FCN)
675 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
676 if (fcp_parm & FCP_SPPF_TARG_FCN)
677 roles |= FC_RPORT_ROLE_FCP_TARGET;
679 rdata->ids.roles = roles;
680 fc_rport_enter_rtv(rdata);
683 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
684 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
690 mutex_unlock(&rdata->rp_mutex);
691 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
695 * fc_rport_logo_resp() - Logout (LOGO) response handler
696 * @sp: current sequence in the LOGO exchange
697 * @fp: response frame
698 * @rdata_arg: private remote port data
700 * Locking Note: This function will be called without the rport lock
701 * held, but it will lock, call an _enter_* function or fc_rport_error
702 * and then unlock the rport.
704 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
707 struct fc_rport_priv *rdata = rdata_arg;
710 mutex_lock(&rdata->rp_mutex);
712 FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
714 if (rdata->rp_state != RPORT_ST_LOGO) {
715 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
716 "%s\n", fc_rport_state(rdata));
723 fc_rport_error_retry(rdata, fp);
727 op = fc_frame_payload_op(fp);
728 if (op == ELS_LS_ACC) {
729 fc_rport_enter_rtv(rdata);
731 FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n");
732 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
738 mutex_unlock(&rdata->rp_mutex);
739 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
743 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
744 * @rdata: private remote port data
746 * Locking Note: The rport lock is expected to be held before calling
749 static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
751 struct fc_lport *lport = rdata->local_port;
753 struct fc_els_prli prli;
754 struct fc_els_spp spp;
759 * If the rport is one of the well known addresses
760 * we skip PRLI and RTV and go straight to READY.
762 if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
763 fc_rport_enter_ready(rdata);
767 FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
768 fc_rport_state(rdata));
770 fc_rport_state_enter(rdata, RPORT_ST_PRLI);
772 fp = fc_frame_alloc(lport, sizeof(*pp));
774 fc_rport_error_retry(rdata, fp);
778 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
779 fc_rport_prli_resp, rdata, lport->e_d_tov))
780 fc_rport_error_retry(rdata, fp);
782 kref_get(&rdata->kref);
786 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
787 * @sp: current sequence in the RTV exchange
788 * @fp: response frame
789 * @rdata_arg: private remote port data
791 * Many targets don't seem to support this.
793 * Locking Note: This function will be called without the rport lock
794 * held, but it will lock, call an _enter_* function or fc_rport_error
795 * and then unlock the rport.
797 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
800 struct fc_rport_priv *rdata = rdata_arg;
803 mutex_lock(&rdata->rp_mutex);
805 FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
807 if (rdata->rp_state != RPORT_ST_RTV) {
808 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
809 "%s\n", fc_rport_state(rdata));
816 fc_rport_error(rdata, fp);
820 op = fc_frame_payload_op(fp);
821 if (op == ELS_LS_ACC) {
822 struct fc_els_rtv_acc *rtv;
826 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
828 toq = ntohl(rtv->rtv_toq);
829 tov = ntohl(rtv->rtv_r_a_tov);
832 rdata->r_a_tov = tov;
833 tov = ntohl(rtv->rtv_e_d_tov);
834 if (toq & FC_ELS_RTV_EDRES)
838 rdata->e_d_tov = tov;
842 fc_rport_enter_ready(rdata);
847 mutex_unlock(&rdata->rp_mutex);
848 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
852 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
853 * @rdata: private remote port data
855 * Locking Note: The rport lock is expected to be held before calling
858 static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
861 struct fc_lport *lport = rdata->local_port;
863 FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
864 fc_rport_state(rdata));
866 fc_rport_state_enter(rdata, RPORT_ST_RTV);
868 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
870 fc_rport_error_retry(rdata, fp);
874 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
875 fc_rport_rtv_resp, rdata, lport->e_d_tov))
876 fc_rport_error_retry(rdata, fp);
878 kref_get(&rdata->kref);
882 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
883 * @rdata: private remote port data
885 * Locking Note: The rport lock is expected to be held before calling
888 static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
890 struct fc_lport *lport = rdata->local_port;
893 FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
894 fc_rport_state(rdata));
896 fc_rport_state_enter(rdata, RPORT_ST_LOGO);
898 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
900 fc_rport_error_retry(rdata, fp);
904 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
905 fc_rport_logo_resp, rdata, lport->e_d_tov))
906 fc_rport_error_retry(rdata, fp);
908 kref_get(&rdata->kref);
912 * fc_rport_recv_els_req() - handle a validated ELS request.
913 * @lport: Fibre Channel local port
914 * @sp: current sequence in the PLOGI exchange
915 * @fp: response frame
917 * Handle incoming ELS requests that require port login.
918 * The ELS opcode has already been validated by the caller.
920 * Locking Note: Called with the lport lock held.
922 static void fc_rport_recv_els_req(struct fc_lport *lport,
923 struct fc_seq *sp, struct fc_frame *fp)
925 struct fc_rport_priv *rdata;
926 struct fc_frame_header *fh;
927 struct fc_seq_els_data els_data;
930 els_data.reason = ELS_RJT_UNAB;
931 els_data.explan = ELS_EXPL_PLOGI_REQD;
933 fh = fc_frame_header_get(fp);
935 mutex_lock(&lport->disc.disc_mutex);
936 rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id));
938 mutex_unlock(&lport->disc.disc_mutex);
941 mutex_lock(&rdata->rp_mutex);
942 mutex_unlock(&lport->disc.disc_mutex);
944 switch (rdata->rp_state) {
950 mutex_unlock(&rdata->rp_mutex);
954 switch (fc_frame_payload_op(fp)) {
956 fc_rport_recv_prli_req(rdata, sp, fp);
959 fc_rport_recv_prlo_req(rdata, sp, fp);
963 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
967 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
970 fc_frame_free(fp); /* can't happen */
974 mutex_unlock(&rdata->rp_mutex);
978 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
983 * fc_rport_recv_req() - Handle a received ELS request from a rport
984 * @sp: current sequence in the PLOGI exchange
985 * @fp: response frame
986 * @lport: Fibre Channel local port
988 * Locking Note: Called with the lport lock held.
990 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
991 struct fc_lport *lport)
993 struct fc_seq_els_data els_data;
996 * Handle PLOGI and LOGO requests separately, since they
997 * don't require prior login.
998 * Check for unsupported opcodes first and reject them.
999 * For some ops, it would be incorrect to reject with "PLOGI required".
1001 switch (fc_frame_payload_op(fp)) {
1003 fc_rport_recv_plogi_req(lport, sp, fp);
1006 fc_rport_recv_logo_req(lport, sp, fp);
1012 fc_rport_recv_els_req(lport, sp, fp);
1017 els_data.reason = ELS_RJT_UNSUP;
1018 els_data.explan = ELS_EXPL_NONE;
1019 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1025 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
1026 * @lport: local port
1027 * @sp: current sequence in the PLOGI exchange
1028 * @fp: PLOGI request frame
1030 * Locking Note: The rport lock is held before calling this function.
1032 static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1033 struct fc_seq *sp, struct fc_frame *rx_fp)
1035 struct fc_disc *disc;
1036 struct fc_rport_priv *rdata;
1037 struct fc_frame *fp = rx_fp;
1039 struct fc_frame_header *fh;
1040 struct fc_els_flogi *pl;
1041 struct fc_seq_els_data rjt_data;
1045 fh = fc_frame_header_get(fp);
1046 sid = ntoh24(fh->fh_s_id);
1048 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
1050 pl = fc_frame_payload_get(fp, sizeof(*pl));
1052 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
1053 rjt_data.reason = ELS_RJT_PROT;
1054 rjt_data.explan = ELS_EXPL_INV_LEN;
1058 disc = &lport->disc;
1059 mutex_lock(&disc->disc_mutex);
1060 rdata = lport->tt.rport_create(lport, sid);
1062 mutex_unlock(&disc->disc_mutex);
1063 rjt_data.reason = ELS_RJT_UNAB;
1064 rjt_data.explan = ELS_EXPL_INSUF_RES;
1068 mutex_lock(&rdata->rp_mutex);
1069 mutex_unlock(&disc->disc_mutex);
1071 rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
1072 rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
1075 * If the rport was just created, possibly due to the incoming PLOGI,
1076 * set the state appropriately and accept the PLOGI.
1078 * If we had also sent a PLOGI, and if the received PLOGI is from a
1079 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
1080 * "command already in progress".
1082 * XXX TBD: If the session was ready before, the PLOGI should result in
1083 * all outstanding exchanges being reset.
1085 switch (rdata->rp_state) {
1087 FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
1089 case RPORT_ST_PLOGI:
1090 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
1091 if (rdata->ids.port_name < lport->wwpn) {
1092 mutex_unlock(&rdata->rp_mutex);
1093 rjt_data.reason = ELS_RJT_INPROG;
1094 rjt_data.explan = ELS_EXPL_NONE;
1099 case RPORT_ST_READY:
1101 case RPORT_ST_DELETE:
1103 FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n",
1105 fc_frame_free(rx_fp);
1110 * Get session payload size from incoming PLOGI.
1112 rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
1113 fc_frame_free(rx_fp);
1116 * Send LS_ACC. If this fails, the originator should retry.
1118 sp = lport->tt.seq_start_next(sp);
1121 fp = fc_frame_alloc(lport, sizeof(*pl));
1125 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1126 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1127 ep = fc_seq_exch(sp);
1128 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1129 FC_TYPE_ELS, f_ctl, 0);
1130 lport->tt.seq_send(lport, sp, fp);
1131 fc_rport_enter_prli(rdata);
1133 mutex_unlock(&rdata->rp_mutex);
1137 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1142 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1143 * @rdata: private remote port data
1144 * @sp: current sequence in the PRLI exchange
1145 * @fp: PRLI request frame
1147 * Locking Note: The rport lock is exected to be held before calling
1150 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1151 struct fc_seq *sp, struct fc_frame *rx_fp)
1153 struct fc_lport *lport = rdata->local_port;
1155 struct fc_frame *fp;
1156 struct fc_frame_header *fh;
1158 struct fc_els_prli prli;
1159 struct fc_els_spp spp;
1161 struct fc_els_spp *rspp; /* request service param page */
1162 struct fc_els_spp *spp; /* response spp */
1165 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1166 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1167 enum fc_els_spp_resp resp;
1168 struct fc_seq_els_data rjt_data;
1171 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1174 fh = fc_frame_header_get(rx_fp);
1176 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1177 fc_rport_state(rdata));
1179 switch (rdata->rp_state) {
1182 case RPORT_ST_READY:
1183 reason = ELS_RJT_NONE;
1186 fc_frame_free(rx_fp);
1190 len = fr_len(rx_fp) - sizeof(*fh);
1191 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1193 reason = ELS_RJT_PROT;
1194 explan = ELS_EXPL_INV_LEN;
1196 plen = ntohs(pp->prli.prli_len);
1197 if ((plen % 4) != 0 || plen > len) {
1198 reason = ELS_RJT_PROT;
1199 explan = ELS_EXPL_INV_LEN;
1200 } else if (plen < len) {
1203 plen = pp->prli.prli_spp_len;
1204 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1205 plen > len || len < sizeof(*pp)) {
1206 reason = ELS_RJT_PROT;
1207 explan = ELS_EXPL_INV_LEN;
1211 if (reason != ELS_RJT_NONE ||
1212 (fp = fc_frame_alloc(lport, len)) == NULL) {
1213 rjt_data.reason = reason;
1214 rjt_data.explan = explan;
1215 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1217 sp = lport->tt.seq_start_next(sp);
1219 pp = fc_frame_payload_get(fp, len);
1222 pp->prli.prli_cmd = ELS_LS_ACC;
1223 pp->prli.prli_spp_len = plen;
1224 pp->prli.prli_len = htons(len);
1225 len -= sizeof(struct fc_els_prli);
1227 /* reinitialize remote port roles */
1228 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
1231 * Go through all the service parameter pages and build
1232 * response. If plen indicates longer SPP than standard,
1233 * use that. The entire response has been pre-cleared above.
1236 while (len >= plen) {
1237 spp->spp_type = rspp->spp_type;
1238 spp->spp_type_ext = rspp->spp_type_ext;
1239 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1240 resp = FC_SPP_RESP_ACK;
1241 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1242 resp = FC_SPP_RESP_NO_PA;
1243 switch (rspp->spp_type) {
1244 case 0: /* common to all FC-4 types */
1247 fcp_parm = ntohl(rspp->spp_params);
1248 if (fcp_parm * FCP_SPPF_RETRY)
1249 rdata->flags |= FC_RP_FLAGS_RETRY;
1250 rdata->supported_classes = FC_COS_CLASS3;
1251 if (fcp_parm & FCP_SPPF_INIT_FCN)
1252 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1253 if (fcp_parm & FCP_SPPF_TARG_FCN)
1254 roles |= FC_RPORT_ROLE_FCP_TARGET;
1255 rdata->ids.roles = roles;
1258 htonl(lport->service_params);
1261 resp = FC_SPP_RESP_INVL;
1264 spp->spp_flags |= resp;
1266 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1267 spp = (struct fc_els_spp *)((char *)spp + plen);
1271 * Send LS_ACC. If this fails, the originator should retry.
1273 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1274 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1275 ep = fc_seq_exch(sp);
1276 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1277 FC_TYPE_ELS, f_ctl, 0);
1278 lport->tt.seq_send(lport, sp, fp);
1281 * Get lock and re-check state.
1283 switch (rdata->rp_state) {
1285 fc_rport_enter_ready(rdata);
1287 case RPORT_ST_READY:
1293 fc_frame_free(rx_fp);
1297 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1298 * @rdata: private remote port data
1299 * @sp: current sequence in the PRLO exchange
1300 * @fp: PRLO request frame
1302 * Locking Note: The rport lock is exected to be held before calling
1305 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1307 struct fc_frame *fp)
1309 struct fc_lport *lport = rdata->local_port;
1311 struct fc_frame_header *fh;
1312 struct fc_seq_els_data rjt_data;
1314 fh = fc_frame_header_get(fp);
1316 FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1317 fc_rport_state(rdata));
1320 rjt_data.reason = ELS_RJT_UNAB;
1321 rjt_data.explan = ELS_EXPL_NONE;
1322 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1327 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1328 * @lport: local port.
1329 * @sp: current sequence in the LOGO exchange
1330 * @fp: LOGO request frame
1332 * Locking Note: The rport lock is exected to be held before calling
1335 static void fc_rport_recv_logo_req(struct fc_lport *lport,
1337 struct fc_frame *fp)
1339 struct fc_frame_header *fh;
1340 struct fc_rport_priv *rdata;
1343 fh = fc_frame_header_get(fp);
1344 sid = ntoh24(fh->fh_s_id);
1346 mutex_lock(&lport->disc.disc_mutex);
1347 rdata = lport->tt.rport_lookup(lport, sid);
1349 mutex_lock(&rdata->rp_mutex);
1350 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1351 fc_rport_state(rdata));
1352 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1353 mutex_unlock(&rdata->rp_mutex);
1355 FC_RPORT_ID_DBG(lport, sid,
1356 "Received LOGO from non-logged-in port\n");
1357 mutex_unlock(&lport->disc.disc_mutex);
1358 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1362 static void fc_rport_flush_queue(void)
1364 flush_workqueue(rport_event_queue);
1367 int fc_rport_init(struct fc_lport *lport)
1369 if (!lport->tt.rport_lookup)
1370 lport->tt.rport_lookup = fc_rport_lookup;
1372 if (!lport->tt.rport_create)
1373 lport->tt.rport_create = fc_rport_create;
1375 if (!lport->tt.rport_login)
1376 lport->tt.rport_login = fc_rport_login;
1378 if (!lport->tt.rport_logoff)
1379 lport->tt.rport_logoff = fc_rport_logoff;
1381 if (!lport->tt.rport_recv_req)
1382 lport->tt.rport_recv_req = fc_rport_recv_req;
1384 if (!lport->tt.rport_flush_queue)
1385 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1387 if (!lport->tt.rport_destroy)
1388 lport->tt.rport_destroy = fc_rport_destroy;
1392 EXPORT_SYMBOL(fc_rport_init);
1394 int fc_setup_rport(void)
1396 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1397 if (!rport_event_queue)
1401 EXPORT_SYMBOL(fc_setup_rport);
1403 void fc_destroy_rport(void)
1405 destroy_workqueue(rport_event_queue);
1407 EXPORT_SYMBOL(fc_destroy_rport);
1409 void fc_rport_terminate_io(struct fc_rport *rport)
1411 struct fc_rport_libfc_priv *rp = rport->dd_data;
1412 struct fc_lport *lport = rp->local_port;
1414 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1415 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1417 EXPORT_SYMBOL(fc_rport_terminate_io);