2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * Maintained at www.Open-FCoE.org
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
28 * fc_rport's represent N_Port's within the fabric.
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
58 struct workqueue_struct *rport_event_queue;
60 static void fc_rport_enter_plogi(struct fc_rport *);
61 static void fc_rport_enter_prli(struct fc_rport *);
62 static void fc_rport_enter_rtv(struct fc_rport *);
63 static void fc_rport_enter_ready(struct fc_rport *);
64 static void fc_rport_enter_logo(struct fc_rport *);
66 static void fc_rport_recv_plogi_req(struct fc_rport *,
67 struct fc_seq *, struct fc_frame *);
68 static void fc_rport_recv_prli_req(struct fc_rport *,
69 struct fc_seq *, struct fc_frame *);
70 static void fc_rport_recv_prlo_req(struct fc_rport *,
71 struct fc_seq *, struct fc_frame *);
72 static void fc_rport_recv_logo_req(struct fc_rport *,
73 struct fc_seq *, struct fc_frame *);
74 static void fc_rport_timeout(struct work_struct *);
75 static void fc_rport_error(struct fc_rport *, struct fc_frame *);
76 static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *);
77 static void fc_rport_work(struct work_struct *);
79 static const char *fc_rport_state_names[] = {
80 [RPORT_ST_INIT] = "Init",
81 [RPORT_ST_PLOGI] = "PLOGI",
82 [RPORT_ST_PRLI] = "PRLI",
83 [RPORT_ST_RTV] = "RTV",
84 [RPORT_ST_READY] = "Ready",
85 [RPORT_ST_LOGO] = "LOGO",
86 [RPORT_ST_DELETE] = "Delete",
89 static void fc_rport_rogue_destroy(struct device *dev)
91 struct fc_rport *rport = dev_to_rport(dev);
92 FC_RPORT_DBG(rport, "Destroying rogue rport\n");
96 struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
98 struct fc_rport *rport;
99 struct fc_rport_libfc_priv *rdata;
100 rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
105 rdata = RPORT_TO_PRIV(rport);
107 rport->dd_data = rdata;
108 rport->port_id = dp->ids.port_id;
109 rport->port_name = dp->ids.port_name;
110 rport->node_name = dp->ids.node_name;
111 rport->roles = dp->ids.roles;
112 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
114 * Note: all this libfc rogue rport code will be removed for
115 * upstream so it fine that this is really ugly and hacky right now.
117 device_initialize(&rport->dev);
118 rport->dev.release = fc_rport_rogue_destroy;
120 mutex_init(&rdata->rp_mutex);
121 rdata->local_port = dp->lp;
122 rdata->trans_state = FC_PORTSTATE_ROGUE;
123 rdata->rp_state = RPORT_ST_INIT;
124 rdata->event = RPORT_EV_NONE;
125 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
127 rdata->e_d_tov = dp->lp->e_d_tov;
128 rdata->r_a_tov = dp->lp->r_a_tov;
129 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
130 INIT_WORK(&rdata->event_work, fc_rport_work);
132 * For good measure, but not necessary as we should only
133 * add REAL rport to the lport list.
135 INIT_LIST_HEAD(&rdata->peers);
141 * fc_rport_state() - return a string for the state the rport is in
142 * @rport: The rport whose state we want to get a string for
144 static const char *fc_rport_state(struct fc_rport *rport)
147 struct fc_rport_libfc_priv *rdata = rport->dd_data;
149 cp = fc_rport_state_names[rdata->rp_state];
156 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
157 * @rport: Pointer to Fibre Channel remote port structure
158 * @timeout: timeout in seconds
160 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
163 rport->dev_loss_tmo = timeout + 5;
165 rport->dev_loss_tmo = 30;
167 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
170 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
171 * @flp: FLOGI payload structure
172 * @maxval: upper limit, may be less than what is in the service parameters
174 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
180 * Get max payload from the common service parameters and the
181 * class 3 receive data field size.
183 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
184 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
186 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
187 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
193 * fc_rport_state_enter() - Change the rport's state
194 * @rport: The rport whose state should change
195 * @new: The new state of the rport
197 * Locking Note: Called with the rport lock held
199 static void fc_rport_state_enter(struct fc_rport *rport,
200 enum fc_rport_state new)
202 struct fc_rport_libfc_priv *rdata = rport->dd_data;
203 if (rdata->rp_state != new)
205 rdata->rp_state = new;
208 static void fc_rport_work(struct work_struct *work)
211 struct fc_rport_libfc_priv *rdata =
212 container_of(work, struct fc_rport_libfc_priv, event_work);
213 enum fc_rport_event event;
214 enum fc_rport_trans_state trans_state;
215 struct fc_lport *lport = rdata->local_port;
216 struct fc_rport_operations *rport_ops;
217 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
219 mutex_lock(&rdata->rp_mutex);
220 event = rdata->event;
221 rport_ops = rdata->ops;
223 if (event == RPORT_EV_CREATED) {
224 struct fc_rport *new_rport;
225 struct fc_rport_libfc_priv *new_rdata;
226 struct fc_rport_identifiers ids;
228 ids.port_id = rport->port_id;
229 ids.roles = rport->roles;
230 ids.port_name = rport->port_name;
231 ids.node_name = rport->node_name;
233 rdata->event = RPORT_EV_NONE;
234 mutex_unlock(&rdata->rp_mutex);
236 new_rport = fc_remote_port_add(lport->host, 0, &ids);
239 * Switch from the rogue rport to the rport
240 * returned by the FC class.
242 new_rport->maxframe_size = rport->maxframe_size;
244 new_rdata = new_rport->dd_data;
245 new_rdata->e_d_tov = rdata->e_d_tov;
246 new_rdata->r_a_tov = rdata->r_a_tov;
247 new_rdata->ops = rdata->ops;
248 new_rdata->local_port = rdata->local_port;
249 new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
250 new_rdata->trans_state = FC_PORTSTATE_REAL;
251 mutex_init(&new_rdata->rp_mutex);
252 INIT_DELAYED_WORK(&new_rdata->retry_work,
254 INIT_LIST_HEAD(&new_rdata->peers);
255 INIT_WORK(&new_rdata->event_work, fc_rport_work);
257 fc_rport_state_enter(new_rport, RPORT_ST_READY);
259 printk(KERN_WARNING "libfc: Failed to allocate "
260 " memory for rport (%6x)\n", ids.port_id);
261 event = RPORT_EV_FAILED;
263 if (rport->port_id != FC_FID_DIR_SERV)
264 if (rport_ops->event_callback)
265 rport_ops->event_callback(lport, rport,
267 put_device(&rport->dev);
269 rdata = new_rport->dd_data;
270 if (rport_ops->event_callback)
271 rport_ops->event_callback(lport, rport, event);
272 } else if ((event == RPORT_EV_FAILED) ||
273 (event == RPORT_EV_LOGO) ||
274 (event == RPORT_EV_STOP)) {
275 trans_state = rdata->trans_state;
276 mutex_unlock(&rdata->rp_mutex);
277 if (rport_ops->event_callback)
278 rport_ops->event_callback(lport, rport, event);
279 cancel_delayed_work_sync(&rdata->retry_work);
280 if (trans_state == FC_PORTSTATE_ROGUE)
281 put_device(&rport->dev);
283 port_id = rport->port_id;
284 fc_remote_port_delete(rport);
285 lport->tt.exch_mgr_reset(lport, 0, port_id);
286 lport->tt.exch_mgr_reset(lport, port_id, 0);
289 mutex_unlock(&rdata->rp_mutex);
293 * fc_rport_login() - Start the remote port login state machine
294 * @rport: Fibre Channel remote port
296 * Locking Note: Called without the rport lock held. This
297 * function will hold the rport lock, call an _enter_*
298 * function and then unlock the rport.
300 int fc_rport_login(struct fc_rport *rport)
302 struct fc_rport_libfc_priv *rdata = rport->dd_data;
304 mutex_lock(&rdata->rp_mutex);
306 FC_RPORT_DBG(rport, "Login to port\n");
308 fc_rport_enter_plogi(rport);
310 mutex_unlock(&rdata->rp_mutex);
316 * fc_rport_enter_delete() - schedule a remote port to be deleted.
317 * @rport: Fibre Channel remote port
318 * @event: event to report as the reason for deletion
320 * Locking Note: Called with the rport lock held.
322 * Allow state change into DELETE only once.
324 * Call queue_work only if there's no event already pending.
325 * Set the new event so that the old pending event will not occur.
326 * Since we have the mutex, even if fc_rport_work() is already started,
327 * it'll see the new event.
329 static void fc_rport_enter_delete(struct fc_rport *rport,
330 enum fc_rport_event event)
332 struct fc_rport_libfc_priv *rdata = rport->dd_data;
334 if (rdata->rp_state == RPORT_ST_DELETE)
337 FC_RPORT_DBG(rport, "Delete port\n");
339 fc_rport_state_enter(rport, RPORT_ST_DELETE);
341 if (rdata->event == RPORT_EV_NONE)
342 queue_work(rport_event_queue, &rdata->event_work);
343 rdata->event = event;
347 * fc_rport_logoff() - Logoff and remove an rport
348 * @rport: Fibre Channel remote port to be removed
350 * Locking Note: Called without the rport lock held. This
351 * function will hold the rport lock, call an _enter_*
352 * function and then unlock the rport.
354 int fc_rport_logoff(struct fc_rport *rport)
356 struct fc_rport_libfc_priv *rdata = rport->dd_data;
358 mutex_lock(&rdata->rp_mutex);
360 FC_RPORT_DBG(rport, "Remove port\n");
362 if (rdata->rp_state == RPORT_ST_DELETE) {
363 FC_RPORT_DBG(rport, "Port in Delete state, not removing\n");
364 mutex_unlock(&rdata->rp_mutex);
368 fc_rport_enter_logo(rport);
371 * Change the state to Delete so that we discard
374 fc_rport_enter_delete(rport, RPORT_EV_STOP);
375 mutex_unlock(&rdata->rp_mutex);
382 * fc_rport_enter_ready() - The rport is ready
383 * @rport: Fibre Channel remote port that is ready
385 * Locking Note: The rport lock is expected to be held before calling
388 static void fc_rport_enter_ready(struct fc_rport *rport)
390 struct fc_rport_libfc_priv *rdata = rport->dd_data;
392 fc_rport_state_enter(rport, RPORT_ST_READY);
394 FC_RPORT_DBG(rport, "Port is Ready\n");
396 if (rdata->event == RPORT_EV_NONE)
397 queue_work(rport_event_queue, &rdata->event_work);
398 rdata->event = RPORT_EV_CREATED;
402 * fc_rport_timeout() - Handler for the retry_work timer.
403 * @work: The work struct of the fc_rport_libfc_priv
405 * Locking Note: Called without the rport lock held. This
406 * function will hold the rport lock, call an _enter_*
407 * function and then unlock the rport.
409 static void fc_rport_timeout(struct work_struct *work)
411 struct fc_rport_libfc_priv *rdata =
412 container_of(work, struct fc_rport_libfc_priv, retry_work.work);
413 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
415 mutex_lock(&rdata->rp_mutex);
417 switch (rdata->rp_state) {
419 fc_rport_enter_plogi(rport);
422 fc_rport_enter_prli(rport);
425 fc_rport_enter_rtv(rport);
428 fc_rport_enter_logo(rport);
432 case RPORT_ST_DELETE:
436 mutex_unlock(&rdata->rp_mutex);
440 * fc_rport_error() - Error handler, called once retries have been exhausted
441 * @rport: The fc_rport object
442 * @fp: The frame pointer
444 * Locking Note: The rport lock is expected to be held before
445 * calling this routine
447 static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
449 struct fc_rport_libfc_priv *rdata = rport->dd_data;
451 FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n",
452 PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
454 switch (rdata->rp_state) {
458 fc_rport_enter_delete(rport, RPORT_EV_FAILED);
461 fc_rport_enter_ready(rport);
463 case RPORT_ST_DELETE:
471 * fc_rport_error_retry() - Error handler when retries are desired
472 * @rport: The fc_rport object
473 * @fp: The frame pointer
475 * If the error was an exchange timeout retry immediately,
476 * otherwise wait for E_D_TOV.
478 * Locking Note: The rport lock is expected to be held before
479 * calling this routine
481 static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
483 struct fc_rport_libfc_priv *rdata = rport->dd_data;
484 unsigned long delay = FC_DEF_E_D_TOV;
486 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
487 if (PTR_ERR(fp) == -FC_EX_CLOSED)
488 return fc_rport_error(rport, fp);
490 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
491 FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n",
492 PTR_ERR(fp), fc_rport_state(rport));
494 /* no additional delay on exchange timeouts */
495 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
497 schedule_delayed_work(&rdata->retry_work, delay);
501 return fc_rport_error(rport, fp);
505 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
506 * @sp: current sequence in the PLOGI exchange
507 * @fp: response frame
508 * @rp_arg: Fibre Channel remote port
510 * Locking Note: This function will be called without the rport lock
511 * held, but it will lock, call an _enter_* function or fc_rport_error
512 * and then unlock the rport.
514 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
517 struct fc_rport *rport = rp_arg;
518 struct fc_rport_libfc_priv *rdata = rport->dd_data;
519 struct fc_lport *lport = rdata->local_port;
520 struct fc_els_flogi *plp = NULL;
526 mutex_lock(&rdata->rp_mutex);
528 FC_RPORT_DBG(rport, "Received a PLOGI response\n");
530 if (rdata->rp_state != RPORT_ST_PLOGI) {
531 FC_RPORT_DBG(rport, "Received a PLOGI response, but in state "
532 "%s\n", fc_rport_state(rport));
539 fc_rport_error_retry(rport, fp);
543 op = fc_frame_payload_op(fp);
544 if (op == ELS_LS_ACC &&
545 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
546 rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
547 rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
549 tov = ntohl(plp->fl_csp.sp_e_d_tov);
550 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
552 if (tov > rdata->e_d_tov)
553 rdata->e_d_tov = tov;
554 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
555 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
556 if (cssp_seq < csp_seq)
558 rdata->max_seq = csp_seq;
559 rport->maxframe_size =
560 fc_plogi_get_maxframe(plp, lport->mfs);
563 * If the rport is one of the well known addresses
564 * we skip PRLI and RTV and go straight to READY.
566 if (rport->port_id >= FC_FID_DOM_MGR)
567 fc_rport_enter_ready(rport);
569 fc_rport_enter_prli(rport);
571 fc_rport_error_retry(rport, fp);
576 mutex_unlock(&rdata->rp_mutex);
577 put_device(&rport->dev);
581 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
582 * @rport: Fibre Channel remote port to send PLOGI to
584 * Locking Note: The rport lock is expected to be held before calling
587 static void fc_rport_enter_plogi(struct fc_rport *rport)
589 struct fc_rport_libfc_priv *rdata = rport->dd_data;
590 struct fc_lport *lport = rdata->local_port;
593 FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n",
594 fc_rport_state(rport));
596 fc_rport_state_enter(rport, RPORT_ST_PLOGI);
598 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
599 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
601 fc_rport_error_retry(rport, fp);
604 rdata->e_d_tov = lport->e_d_tov;
606 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
607 fc_rport_plogi_resp, rport, lport->e_d_tov))
608 fc_rport_error_retry(rport, fp);
610 get_device(&rport->dev);
614 * fc_rport_prli_resp() - Process Login (PRLI) response handler
615 * @sp: current sequence in the PRLI exchange
616 * @fp: response frame
617 * @rp_arg: Fibre Channel remote port
619 * Locking Note: This function will be called without the rport lock
620 * held, but it will lock, call an _enter_* function or fc_rport_error
621 * and then unlock the rport.
623 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
626 struct fc_rport *rport = rp_arg;
627 struct fc_rport_libfc_priv *rdata = rport->dd_data;
629 struct fc_els_prli prli;
630 struct fc_els_spp spp;
632 u32 roles = FC_RPORT_ROLE_UNKNOWN;
636 mutex_lock(&rdata->rp_mutex);
638 FC_RPORT_DBG(rport, "Received a PRLI response\n");
640 if (rdata->rp_state != RPORT_ST_PRLI) {
641 FC_RPORT_DBG(rport, "Received a PRLI response, but in state "
642 "%s\n", fc_rport_state(rport));
649 fc_rport_error_retry(rport, fp);
653 op = fc_frame_payload_op(fp);
654 if (op == ELS_LS_ACC) {
655 pp = fc_frame_payload_get(fp, sizeof(*pp));
656 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
657 fcp_parm = ntohl(pp->spp.spp_params);
658 if (fcp_parm & FCP_SPPF_RETRY)
659 rdata->flags |= FC_RP_FLAGS_RETRY;
662 rport->supported_classes = FC_COS_CLASS3;
663 if (fcp_parm & FCP_SPPF_INIT_FCN)
664 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
665 if (fcp_parm & FCP_SPPF_TARG_FCN)
666 roles |= FC_RPORT_ROLE_FCP_TARGET;
668 rport->roles = roles;
669 fc_rport_enter_rtv(rport);
672 FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n");
673 fc_rport_enter_delete(rport, RPORT_EV_FAILED);
679 mutex_unlock(&rdata->rp_mutex);
680 put_device(&rport->dev);
684 * fc_rport_logo_resp() - Logout (LOGO) response handler
685 * @sp: current sequence in the LOGO exchange
686 * @fp: response frame
687 * @rp_arg: Fibre Channel remote port
689 * Locking Note: This function will be called without the rport lock
690 * held, but it will lock, call an _enter_* function or fc_rport_error
691 * and then unlock the rport.
693 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
696 struct fc_rport *rport = rp_arg;
697 struct fc_rport_libfc_priv *rdata = rport->dd_data;
700 mutex_lock(&rdata->rp_mutex);
702 FC_RPORT_DBG(rport, "Received a LOGO response\n");
704 if (rdata->rp_state != RPORT_ST_LOGO) {
705 FC_RPORT_DBG(rport, "Received a LOGO response, but in state "
706 "%s\n", fc_rport_state(rport));
713 fc_rport_error_retry(rport, fp);
717 op = fc_frame_payload_op(fp);
718 if (op == ELS_LS_ACC) {
719 fc_rport_enter_rtv(rport);
721 FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n");
722 fc_rport_enter_delete(rport, RPORT_EV_LOGO);
728 mutex_unlock(&rdata->rp_mutex);
729 put_device(&rport->dev);
733 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
734 * @rport: Fibre Channel remote port to send PRLI to
736 * Locking Note: The rport lock is expected to be held before calling
739 static void fc_rport_enter_prli(struct fc_rport *rport)
741 struct fc_rport_libfc_priv *rdata = rport->dd_data;
742 struct fc_lport *lport = rdata->local_port;
744 struct fc_els_prli prli;
745 struct fc_els_spp spp;
749 FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n",
750 fc_rport_state(rport));
752 fc_rport_state_enter(rport, RPORT_ST_PRLI);
754 fp = fc_frame_alloc(lport, sizeof(*pp));
756 fc_rport_error_retry(rport, fp);
760 if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
761 fc_rport_prli_resp, rport, lport->e_d_tov))
762 fc_rport_error_retry(rport, fp);
764 get_device(&rport->dev);
768 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
769 * @sp: current sequence in the RTV exchange
770 * @fp: response frame
771 * @rp_arg: Fibre Channel remote port
773 * Many targets don't seem to support this.
775 * Locking Note: This function will be called without the rport lock
776 * held, but it will lock, call an _enter_* function or fc_rport_error
777 * and then unlock the rport.
779 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
782 struct fc_rport *rport = rp_arg;
783 struct fc_rport_libfc_priv *rdata = rport->dd_data;
786 mutex_lock(&rdata->rp_mutex);
788 FC_RPORT_DBG(rport, "Received a RTV response\n");
790 if (rdata->rp_state != RPORT_ST_RTV) {
791 FC_RPORT_DBG(rport, "Received a RTV response, but in state "
792 "%s\n", fc_rport_state(rport));
799 fc_rport_error(rport, fp);
803 op = fc_frame_payload_op(fp);
804 if (op == ELS_LS_ACC) {
805 struct fc_els_rtv_acc *rtv;
809 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
811 toq = ntohl(rtv->rtv_toq);
812 tov = ntohl(rtv->rtv_r_a_tov);
815 rdata->r_a_tov = tov;
816 tov = ntohl(rtv->rtv_e_d_tov);
817 if (toq & FC_ELS_RTV_EDRES)
821 rdata->e_d_tov = tov;
825 fc_rport_enter_ready(rport);
830 mutex_unlock(&rdata->rp_mutex);
831 put_device(&rport->dev);
835 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
836 * @rport: Fibre Channel remote port to send RTV to
838 * Locking Note: The rport lock is expected to be held before calling
841 static void fc_rport_enter_rtv(struct fc_rport *rport)
844 struct fc_rport_libfc_priv *rdata = rport->dd_data;
845 struct fc_lport *lport = rdata->local_port;
847 FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n",
848 fc_rport_state(rport));
850 fc_rport_state_enter(rport, RPORT_ST_RTV);
852 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
854 fc_rport_error_retry(rport, fp);
858 if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
859 fc_rport_rtv_resp, rport, lport->e_d_tov))
860 fc_rport_error_retry(rport, fp);
862 get_device(&rport->dev);
866 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
867 * @rport: Fibre Channel remote port to send LOGO to
869 * Locking Note: The rport lock is expected to be held before calling
872 static void fc_rport_enter_logo(struct fc_rport *rport)
874 struct fc_rport_libfc_priv *rdata = rport->dd_data;
875 struct fc_lport *lport = rdata->local_port;
878 FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n",
879 fc_rport_state(rport));
881 fc_rport_state_enter(rport, RPORT_ST_LOGO);
883 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
885 fc_rport_error_retry(rport, fp);
889 if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
890 fc_rport_logo_resp, rport, lport->e_d_tov))
891 fc_rport_error_retry(rport, fp);
893 get_device(&rport->dev);
898 * fc_rport_recv_req() - Receive a request from a rport
899 * @sp: current sequence in the PLOGI exchange
900 * @fp: response frame
901 * @rp_arg: Fibre Channel remote port
903 * Locking Note: Called without the rport lock held. This
904 * function will hold the rport lock, call an _enter_*
905 * function and then unlock the rport.
907 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
908 struct fc_rport *rport)
910 struct fc_rport_libfc_priv *rdata = rport->dd_data;
911 struct fc_lport *lport = rdata->local_port;
913 struct fc_frame_header *fh;
914 struct fc_seq_els_data els_data;
917 mutex_lock(&rdata->rp_mutex);
920 els_data.explan = ELS_EXPL_NONE;
921 els_data.reason = ELS_RJT_NONE;
923 fh = fc_frame_header_get(fp);
925 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
926 op = fc_frame_payload_op(fp);
929 fc_rport_recv_plogi_req(rport, sp, fp);
932 fc_rport_recv_prli_req(rport, sp, fp);
935 fc_rport_recv_prlo_req(rport, sp, fp);
938 fc_rport_recv_logo_req(rport, sp, fp);
942 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
946 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
949 els_data.reason = ELS_RJT_UNSUP;
950 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
955 mutex_unlock(&rdata->rp_mutex);
959 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
960 * @rport: Fibre Channel remote port that initiated PLOGI
961 * @sp: current sequence in the PLOGI exchange
962 * @fp: PLOGI request frame
964 * Locking Note: The rport lock is exected to be held before calling
967 static void fc_rport_recv_plogi_req(struct fc_rport *rport,
968 struct fc_seq *sp, struct fc_frame *rx_fp)
970 struct fc_rport_libfc_priv *rdata = rport->dd_data;
971 struct fc_lport *lport = rdata->local_port;
972 struct fc_frame *fp = rx_fp;
974 struct fc_frame_header *fh;
975 struct fc_els_flogi *pl;
976 struct fc_seq_els_data rjt_data;
980 enum fc_els_rjt_reason reject = 0;
984 fh = fc_frame_header_get(fp);
986 FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n",
987 fc_rport_state(rport));
989 sid = ntoh24(fh->fh_s_id);
990 pl = fc_frame_payload_get(fp, sizeof(*pl));
992 FC_RPORT_DBG(rport, "Received PLOGI too short\n");
994 /* XXX TBD: send reject? */
998 wwpn = get_unaligned_be64(&pl->fl_wwpn);
999 wwnn = get_unaligned_be64(&pl->fl_wwnn);
1002 * If the session was just created, possibly due to the incoming PLOGI,
1003 * set the state appropriately and accept the PLOGI.
1005 * If we had also sent a PLOGI, and if the received PLOGI is from a
1006 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
1007 * "command already in progress".
1009 * XXX TBD: If the session was ready before, the PLOGI should result in
1010 * all outstanding exchanges being reset.
1012 switch (rdata->rp_state) {
1014 FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT "
1015 "- reject\n", (unsigned long long)wwpn);
1016 reject = ELS_RJT_UNSUP;
1018 case RPORT_ST_PLOGI:
1019 FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n",
1021 if (wwpn < lport->wwpn)
1022 reject = ELS_RJT_INPROG;
1025 case RPORT_ST_READY:
1026 FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d "
1027 "- ignored for now\n", rdata->rp_state);
1028 /* XXX TBD - should reset */
1030 case RPORT_ST_DELETE:
1032 FC_RPORT_DBG(rport, "Received PLOGI in unexpected "
1033 "state %d\n", rdata->rp_state);
1040 rjt_data.reason = reject;
1041 rjt_data.explan = ELS_EXPL_NONE;
1042 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1045 fp = fc_frame_alloc(lport, sizeof(*pl));
1048 rjt_data.reason = ELS_RJT_UNAB;
1049 rjt_data.explan = ELS_EXPL_NONE;
1050 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1053 sp = lport->tt.seq_start_next(sp);
1055 fc_rport_set_name(rport, wwpn, wwnn);
1058 * Get session payload size from incoming PLOGI.
1060 rport->maxframe_size =
1061 fc_plogi_get_maxframe(pl, lport->mfs);
1062 fc_frame_free(rx_fp);
1063 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1066 * Send LS_ACC. If this fails,
1067 * the originator should retry.
1069 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1070 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1071 ep = fc_seq_exch(sp);
1072 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1073 FC_TYPE_ELS, f_ctl, 0);
1074 lport->tt.seq_send(lport, sp, fp);
1075 if (rdata->rp_state == RPORT_ST_PLOGI)
1076 fc_rport_enter_prli(rport);
1082 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1083 * @rport: Fibre Channel remote port that initiated PRLI
1084 * @sp: current sequence in the PRLI exchange
1085 * @fp: PRLI request frame
1087 * Locking Note: The rport lock is exected to be held before calling
1090 static void fc_rport_recv_prli_req(struct fc_rport *rport,
1091 struct fc_seq *sp, struct fc_frame *rx_fp)
1093 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1094 struct fc_lport *lport = rdata->local_port;
1096 struct fc_frame *fp;
1097 struct fc_frame_header *fh;
1099 struct fc_els_prli prli;
1100 struct fc_els_spp spp;
1102 struct fc_els_spp *rspp; /* request service param page */
1103 struct fc_els_spp *spp; /* response spp */
1106 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1107 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1108 enum fc_els_spp_resp resp;
1109 struct fc_seq_els_data rjt_data;
1112 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1115 fh = fc_frame_header_get(rx_fp);
1117 FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n",
1118 fc_rport_state(rport));
1120 switch (rdata->rp_state) {
1122 case RPORT_ST_READY:
1123 reason = ELS_RJT_NONE;
1126 fc_frame_free(rx_fp);
1130 len = fr_len(rx_fp) - sizeof(*fh);
1131 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1133 reason = ELS_RJT_PROT;
1134 explan = ELS_EXPL_INV_LEN;
1136 plen = ntohs(pp->prli.prli_len);
1137 if ((plen % 4) != 0 || plen > len) {
1138 reason = ELS_RJT_PROT;
1139 explan = ELS_EXPL_INV_LEN;
1140 } else if (plen < len) {
1143 plen = pp->prli.prli_spp_len;
1144 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1145 plen > len || len < sizeof(*pp)) {
1146 reason = ELS_RJT_PROT;
1147 explan = ELS_EXPL_INV_LEN;
1151 if (reason != ELS_RJT_NONE ||
1152 (fp = fc_frame_alloc(lport, len)) == NULL) {
1153 rjt_data.reason = reason;
1154 rjt_data.explan = explan;
1155 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1157 sp = lport->tt.seq_start_next(sp);
1159 pp = fc_frame_payload_get(fp, len);
1162 pp->prli.prli_cmd = ELS_LS_ACC;
1163 pp->prli.prli_spp_len = plen;
1164 pp->prli.prli_len = htons(len);
1165 len -= sizeof(struct fc_els_prli);
1168 * Go through all the service parameter pages and build
1169 * response. If plen indicates longer SPP than standard,
1170 * use that. The entire response has been pre-cleared above.
1173 while (len >= plen) {
1174 spp->spp_type = rspp->spp_type;
1175 spp->spp_type_ext = rspp->spp_type_ext;
1176 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1177 resp = FC_SPP_RESP_ACK;
1178 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1179 resp = FC_SPP_RESP_NO_PA;
1180 switch (rspp->spp_type) {
1181 case 0: /* common to all FC-4 types */
1184 fcp_parm = ntohl(rspp->spp_params);
1185 if (fcp_parm * FCP_SPPF_RETRY)
1186 rdata->flags |= FC_RP_FLAGS_RETRY;
1187 rport->supported_classes = FC_COS_CLASS3;
1188 if (fcp_parm & FCP_SPPF_INIT_FCN)
1189 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1190 if (fcp_parm & FCP_SPPF_TARG_FCN)
1191 roles |= FC_RPORT_ROLE_FCP_TARGET;
1192 rport->roles = roles;
1195 htonl(lport->service_params);
1198 resp = FC_SPP_RESP_INVL;
1201 spp->spp_flags |= resp;
1203 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1204 spp = (struct fc_els_spp *)((char *)spp + plen);
1208 * Send LS_ACC. If this fails, the originator should retry.
1210 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1211 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1212 ep = fc_seq_exch(sp);
1213 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1214 FC_TYPE_ELS, f_ctl, 0);
1215 lport->tt.seq_send(lport, sp, fp);
1218 * Get lock and re-check state.
1220 switch (rdata->rp_state) {
1222 fc_rport_enter_ready(rport);
1224 case RPORT_ST_READY:
1230 fc_frame_free(rx_fp);
1234 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1235 * @rport: Fibre Channel remote port that initiated PRLO
1236 * @sp: current sequence in the PRLO exchange
1237 * @fp: PRLO request frame
1239 * Locking Note: The rport lock is exected to be held before calling
1242 static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
1243 struct fc_frame *fp)
1245 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1246 struct fc_lport *lport = rdata->local_port;
1248 struct fc_frame_header *fh;
1249 struct fc_seq_els_data rjt_data;
1251 fh = fc_frame_header_get(fp);
1253 FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n",
1254 fc_rport_state(rport));
1256 if (rdata->rp_state == RPORT_ST_DELETE) {
1262 rjt_data.reason = ELS_RJT_UNAB;
1263 rjt_data.explan = ELS_EXPL_NONE;
1264 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1269 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1270 * @rport: Fibre Channel remote port that initiated LOGO
1271 * @sp: current sequence in the LOGO exchange
1272 * @fp: LOGO request frame
1274 * Locking Note: The rport lock is exected to be held before calling
1277 static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
1278 struct fc_frame *fp)
1280 struct fc_frame_header *fh;
1281 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1282 struct fc_lport *lport = rdata->local_port;
1284 fh = fc_frame_header_get(fp);
1286 FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n",
1287 fc_rport_state(rport));
1289 if (rdata->rp_state == RPORT_ST_DELETE) {
1294 rdata->event = RPORT_EV_LOGO;
1295 fc_rport_state_enter(rport, RPORT_ST_DELETE);
1296 queue_work(rport_event_queue, &rdata->event_work);
1298 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1302 static void fc_rport_flush_queue(void)
1304 flush_workqueue(rport_event_queue);
1307 int fc_rport_init(struct fc_lport *lport)
1309 if (!lport->tt.rport_create)
1310 lport->tt.rport_create = fc_rport_rogue_create;
1312 if (!lport->tt.rport_login)
1313 lport->tt.rport_login = fc_rport_login;
1315 if (!lport->tt.rport_logoff)
1316 lport->tt.rport_logoff = fc_rport_logoff;
1318 if (!lport->tt.rport_recv_req)
1319 lport->tt.rport_recv_req = fc_rport_recv_req;
1321 if (!lport->tt.rport_flush_queue)
1322 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1326 EXPORT_SYMBOL(fc_rport_init);
1328 int fc_setup_rport(void)
1330 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1331 if (!rport_event_queue)
1335 EXPORT_SYMBOL(fc_setup_rport);
1337 void fc_destroy_rport(void)
1339 destroy_workqueue(rport_event_queue);
1341 EXPORT_SYMBOL(fc_destroy_rport);
1343 void fc_rport_terminate_io(struct fc_rport *rport)
1345 struct fc_rport_libfc_priv *rdata = rport->dd_data;
1346 struct fc_lport *lport = rdata->local_port;
1348 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1349 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1351 EXPORT_SYMBOL(fc_rport_terminate_io);