4 * Implementation of FSF commands.
6 * Copyright IBM Corporation 2002, 2009
9 #define KMSG_COMPONENT "zfcp"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/blktrace_api.h>
17 static void zfcp_fsf_request_timeout_handler(unsigned long data)
19 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
20 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
24 static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
25 unsigned long timeout)
27 fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
28 fsf_req->timer.data = (unsigned long) fsf_req->adapter;
29 fsf_req->timer.expires = jiffies + timeout;
30 add_timer(&fsf_req->timer);
33 static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
35 BUG_ON(!fsf_req->erp_action);
36 fsf_req->timer.function = zfcp_erp_timeout_handler;
37 fsf_req->timer.data = (unsigned long) fsf_req->erp_action;
38 fsf_req->timer.expires = jiffies + 30 * HZ;
39 add_timer(&fsf_req->timer);
42 /* association between FSF command and FSF QTCB type */
43 static u32 fsf_qtcb_type[] = {
44 [FSF_QTCB_FCP_CMND] = FSF_IO_COMMAND,
45 [FSF_QTCB_ABORT_FCP_CMND] = FSF_SUPPORT_COMMAND,
46 [FSF_QTCB_OPEN_PORT_WITH_DID] = FSF_SUPPORT_COMMAND,
47 [FSF_QTCB_OPEN_LUN] = FSF_SUPPORT_COMMAND,
48 [FSF_QTCB_CLOSE_LUN] = FSF_SUPPORT_COMMAND,
49 [FSF_QTCB_CLOSE_PORT] = FSF_SUPPORT_COMMAND,
50 [FSF_QTCB_CLOSE_PHYSICAL_PORT] = FSF_SUPPORT_COMMAND,
51 [FSF_QTCB_SEND_ELS] = FSF_SUPPORT_COMMAND,
52 [FSF_QTCB_SEND_GENERIC] = FSF_SUPPORT_COMMAND,
53 [FSF_QTCB_EXCHANGE_CONFIG_DATA] = FSF_CONFIG_COMMAND,
54 [FSF_QTCB_EXCHANGE_PORT_DATA] = FSF_PORT_COMMAND,
55 [FSF_QTCB_DOWNLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND,
56 [FSF_QTCB_UPLOAD_CONTROL_FILE] = FSF_SUPPORT_COMMAND
59 static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
61 u16 subtable = table >> 16;
62 u16 rule = table & 0xffff;
63 const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
65 if (subtable && subtable < ARRAY_SIZE(act_type))
66 dev_warn(&adapter->ccw_device->dev,
67 "Access denied according to ACT rule type %s, "
68 "rule %d\n", act_type[subtable], rule);
71 static void zfcp_fsf_access_denied_port(struct zfcp_fsf_req *req,
72 struct zfcp_port *port)
74 struct fsf_qtcb_header *header = &req->qtcb->header;
75 dev_warn(&req->adapter->ccw_device->dev,
76 "Access denied to port 0x%016Lx\n",
77 (unsigned long long)port->wwpn);
78 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
79 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
80 zfcp_erp_port_access_denied(port, "fspad_1", req);
81 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
84 static void zfcp_fsf_access_denied_unit(struct zfcp_fsf_req *req,
85 struct zfcp_unit *unit)
87 struct fsf_qtcb_header *header = &req->qtcb->header;
88 dev_warn(&req->adapter->ccw_device->dev,
89 "Access denied to unit 0x%016Lx on port 0x%016Lx\n",
90 (unsigned long long)unit->fcp_lun,
91 (unsigned long long)unit->port->wwpn);
92 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[0]);
93 zfcp_act_eval_err(req->adapter, header->fsf_status_qual.halfword[1]);
94 zfcp_erp_unit_access_denied(unit, "fsuad_1", req);
95 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
98 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req)
100 dev_err(&req->adapter->ccw_device->dev, "FCP device not "
101 "operational because of an unsupported FC class\n");
102 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1", req);
103 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
107 * zfcp_fsf_req_free - free memory used by fsf request
108 * @fsf_req: pointer to struct zfcp_fsf_req
110 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
112 if (likely(req->pool)) {
113 if (likely(req->qtcb))
114 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
115 mempool_free(req, req->pool);
119 if (likely(req->qtcb))
120 kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
124 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
127 struct fsf_status_read_buffer *sr_buf = req->data;
128 struct zfcp_adapter *adapter = req->adapter;
129 struct zfcp_port *port;
130 int d_id = sr_buf->d_id & ZFCP_DID_MASK;
132 read_lock_irqsave(&adapter->port_list_lock, flags);
133 list_for_each_entry(port, &adapter->port_list, list)
134 if (port->d_id == d_id) {
135 zfcp_erp_port_reopen(port, 0, "fssrpc1", req);
138 read_unlock_irqrestore(&adapter->port_list_lock, flags);
141 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, char *id,
142 struct fsf_link_down_info *link_down)
144 struct zfcp_adapter *adapter = req->adapter;
146 if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED)
149 atomic_set_mask(ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
151 zfcp_scsi_schedule_rports_block(adapter);
156 switch (link_down->error_code) {
157 case FSF_PSQ_LINK_NO_LIGHT:
158 dev_warn(&req->adapter->ccw_device->dev,
159 "There is no light signal from the local "
160 "fibre channel cable\n");
162 case FSF_PSQ_LINK_WRAP_PLUG:
163 dev_warn(&req->adapter->ccw_device->dev,
164 "There is a wrap plug instead of a fibre "
167 case FSF_PSQ_LINK_NO_FCP:
168 dev_warn(&req->adapter->ccw_device->dev,
169 "The adjacent fibre channel node does not "
172 case FSF_PSQ_LINK_FIRMWARE_UPDATE:
173 dev_warn(&req->adapter->ccw_device->dev,
174 "The FCP device is suspended because of a "
175 "firmware update\n");
177 case FSF_PSQ_LINK_INVALID_WWPN:
178 dev_warn(&req->adapter->ccw_device->dev,
179 "The FCP device detected a WWPN that is "
180 "duplicate or not valid\n");
182 case FSF_PSQ_LINK_NO_NPIV_SUPPORT:
183 dev_warn(&req->adapter->ccw_device->dev,
184 "The fibre channel fabric does not support NPIV\n");
186 case FSF_PSQ_LINK_NO_FCP_RESOURCES:
187 dev_warn(&req->adapter->ccw_device->dev,
188 "The FCP adapter cannot support more NPIV ports\n");
190 case FSF_PSQ_LINK_NO_FABRIC_RESOURCES:
191 dev_warn(&req->adapter->ccw_device->dev,
192 "The adjacent switch cannot support "
193 "more NPIV ports\n");
195 case FSF_PSQ_LINK_FABRIC_LOGIN_UNABLE:
196 dev_warn(&req->adapter->ccw_device->dev,
197 "The FCP adapter could not log in to the "
198 "fibre channel fabric\n");
200 case FSF_PSQ_LINK_WWPN_ASSIGNMENT_CORRUPTED:
201 dev_warn(&req->adapter->ccw_device->dev,
202 "The WWPN assignment file on the FCP adapter "
203 "has been damaged\n");
205 case FSF_PSQ_LINK_MODE_TABLE_CURRUPTED:
206 dev_warn(&req->adapter->ccw_device->dev,
207 "The mode table on the FCP adapter "
208 "has been damaged\n");
210 case FSF_PSQ_LINK_NO_WWPN_ASSIGNMENT:
211 dev_warn(&req->adapter->ccw_device->dev,
212 "All NPIV ports on the FCP adapter have "
216 dev_warn(&req->adapter->ccw_device->dev,
217 "The link between the FCP adapter and "
218 "the FC fabric is down\n");
221 zfcp_erp_adapter_failed(adapter, id, req);
224 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req)
226 struct fsf_status_read_buffer *sr_buf = req->data;
227 struct fsf_link_down_info *ldi =
228 (struct fsf_link_down_info *) &sr_buf->payload;
230 switch (sr_buf->status_subtype) {
231 case FSF_STATUS_READ_SUB_NO_PHYSICAL_LINK:
232 zfcp_fsf_link_down_info_eval(req, "fssrld1", ldi);
234 case FSF_STATUS_READ_SUB_FDISC_FAILED:
235 zfcp_fsf_link_down_info_eval(req, "fssrld2", ldi);
237 case FSF_STATUS_READ_SUB_FIRMWARE_UPDATE:
238 zfcp_fsf_link_down_info_eval(req, "fssrld3", NULL);
242 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
244 struct zfcp_adapter *adapter = req->adapter;
245 struct fsf_status_read_buffer *sr_buf = req->data;
247 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
248 zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
249 mempool_free(sr_buf, adapter->pool.status_read_data);
250 zfcp_fsf_req_free(req);
254 zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
256 switch (sr_buf->status_type) {
257 case FSF_STATUS_READ_PORT_CLOSED:
258 zfcp_fsf_status_read_port_closed(req);
260 case FSF_STATUS_READ_INCOMING_ELS:
261 zfcp_fc_incoming_els(req);
263 case FSF_STATUS_READ_SENSE_DATA_AVAIL:
265 case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
266 dev_warn(&adapter->ccw_device->dev,
267 "The error threshold for checksum statistics "
268 "has been exceeded\n");
269 zfcp_dbf_hba_berr(adapter->dbf, req);
271 case FSF_STATUS_READ_LINK_DOWN:
272 zfcp_fsf_status_read_link_down(req);
274 case FSF_STATUS_READ_LINK_UP:
275 dev_info(&adapter->ccw_device->dev,
276 "The local link has been restored\n");
277 /* All ports should be marked as ready to run again */
278 zfcp_erp_modify_adapter_status(adapter, "fssrh_1", NULL,
279 ZFCP_STATUS_COMMON_RUNNING,
281 zfcp_erp_adapter_reopen(adapter,
282 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
283 ZFCP_STATUS_COMMON_ERP_FAILED,
286 case FSF_STATUS_READ_NOTIFICATION_LOST:
287 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_ACT_UPDATED)
288 zfcp_erp_adapter_access_changed(adapter, "fssrh_3",
290 if (sr_buf->status_subtype & FSF_STATUS_READ_SUB_INCOMING_ELS)
291 queue_work(adapter->work_queue, &adapter->scan_work);
293 case FSF_STATUS_READ_CFDC_UPDATED:
294 zfcp_erp_adapter_access_changed(adapter, "fssrh_4", req);
296 case FSF_STATUS_READ_FEATURE_UPDATE_ALERT:
297 adapter->adapter_features = sr_buf->payload.word[0];
301 mempool_free(sr_buf, adapter->pool.status_read_data);
302 zfcp_fsf_req_free(req);
304 atomic_inc(&adapter->stat_miss);
305 queue_work(adapter->work_queue, &adapter->stat_work);
308 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
310 switch (req->qtcb->header.fsf_status_qual.word[0]) {
311 case FSF_SQ_FCP_RSP_AVAILABLE:
312 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
313 case FSF_SQ_NO_RETRY_POSSIBLE:
314 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
316 case FSF_SQ_COMMAND_ABORTED:
317 req->status |= ZFCP_STATUS_FSFREQ_ABORTED;
319 case FSF_SQ_NO_RECOM:
320 dev_err(&req->adapter->ccw_device->dev,
321 "The FCP adapter reported a problem "
322 "that cannot be recovered\n");
323 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1", req);
326 /* all non-return stats set FSFREQ_ERROR*/
327 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
330 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req)
332 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
335 switch (req->qtcb->header.fsf_status) {
336 case FSF_UNKNOWN_COMMAND:
337 dev_err(&req->adapter->ccw_device->dev,
338 "The FCP adapter does not recognize the command 0x%x\n",
339 req->qtcb->header.fsf_command);
340 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1", req);
341 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
343 case FSF_ADAPTER_STATUS_AVAILABLE:
344 zfcp_fsf_fsfstatus_qual_eval(req);
349 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
351 struct zfcp_adapter *adapter = req->adapter;
352 struct fsf_qtcb *qtcb = req->qtcb;
353 union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
355 zfcp_dbf_hba_fsf_response(req);
357 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
358 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
359 ZFCP_STATUS_FSFREQ_RETRY; /* only for SCSI cmnds. */
363 switch (qtcb->prefix.prot_status) {
365 case FSF_PROT_FSF_STATUS_PRESENTED:
367 case FSF_PROT_QTCB_VERSION_ERROR:
368 dev_err(&adapter->ccw_device->dev,
369 "QTCB version 0x%x not supported by FCP adapter "
370 "(0x%x to 0x%x)\n", FSF_QTCB_CURRENT_VERSION,
371 psq->word[0], psq->word[1]);
372 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_1", req);
374 case FSF_PROT_ERROR_STATE:
375 case FSF_PROT_SEQ_NUMB_ERROR:
376 zfcp_erp_adapter_reopen(adapter, 0, "fspse_2", req);
377 req->status |= ZFCP_STATUS_FSFREQ_RETRY;
379 case FSF_PROT_UNSUPP_QTCB_TYPE:
380 dev_err(&adapter->ccw_device->dev,
381 "The QTCB type is not supported by the FCP adapter\n");
382 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_3", req);
384 case FSF_PROT_HOST_CONNECTION_INITIALIZING:
385 atomic_set_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT,
388 case FSF_PROT_DUPLICATE_REQUEST_ID:
389 dev_err(&adapter->ccw_device->dev,
390 "0x%Lx is an ambiguous request identifier\n",
391 (unsigned long long)qtcb->bottom.support.req_handle);
392 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_4", req);
394 case FSF_PROT_LINK_DOWN:
395 zfcp_fsf_link_down_info_eval(req, "fspse_5",
396 &psq->link_down_info);
397 /* FIXME: reopening adapter now? better wait for link up */
398 zfcp_erp_adapter_reopen(adapter, 0, "fspse_6", req);
400 case FSF_PROT_REEST_QUEUE:
401 /* All ports should be marked as ready to run again */
402 zfcp_erp_modify_adapter_status(adapter, "fspse_7", NULL,
403 ZFCP_STATUS_COMMON_RUNNING,
405 zfcp_erp_adapter_reopen(adapter,
406 ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
407 ZFCP_STATUS_COMMON_ERP_FAILED,
411 dev_err(&adapter->ccw_device->dev,
412 "0x%x is not a valid transfer protocol status\n",
413 qtcb->prefix.prot_status);
414 zfcp_erp_adapter_shutdown(adapter, 0, "fspse_9", req);
416 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
420 * zfcp_fsf_req_complete - process completion of a FSF request
421 * @fsf_req: The FSF request that has been completed.
423 * When a request has been completed either from the FCP adapter,
424 * or it has been dismissed due to a queue shutdown, this function
425 * is called to process the completion status and trigger further
426 * events related to the FSF request.
428 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
430 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
431 zfcp_fsf_status_read_handler(req);
435 del_timer(&req->timer);
436 zfcp_fsf_protstatus_eval(req);
437 zfcp_fsf_fsfstatus_eval(req);
441 zfcp_erp_notify(req->erp_action, 0);
443 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
444 zfcp_fsf_req_free(req);
446 complete(&req->completion);
450 * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
451 * @adapter: pointer to struct zfcp_adapter
453 * Never ever call this without shutting down the adapter first.
454 * Otherwise the adapter would continue using and corrupting s390 storage.
455 * Included BUG_ON() call to ensure this is done.
456 * ERP is supposed to be the only user of this function.
458 void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
460 struct zfcp_fsf_req *req, *tmp;
462 LIST_HEAD(remove_queue);
465 BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
466 spin_lock_irqsave(&adapter->req_list_lock, flags);
467 for (i = 0; i < REQUEST_LIST_SIZE; i++)
468 list_splice_init(&adapter->req_list[i], &remove_queue);
469 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
471 list_for_each_entry_safe(req, tmp, &remove_queue, list) {
472 list_del(&req->list);
473 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
474 zfcp_fsf_req_complete(req);
478 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
480 struct fsf_qtcb_bottom_config *bottom;
481 struct zfcp_adapter *adapter = req->adapter;
482 struct Scsi_Host *shost = adapter->scsi_host;
484 bottom = &req->qtcb->bottom.config;
487 memcpy(req->data, bottom, sizeof(*bottom));
489 fc_host_node_name(shost) = bottom->nport_serv_param.wwnn;
490 fc_host_port_name(shost) = bottom->nport_serv_param.wwpn;
491 fc_host_port_id(shost) = bottom->s_id & ZFCP_DID_MASK;
492 fc_host_speed(shost) = bottom->fc_link_speed;
493 fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;
495 adapter->hydra_version = bottom->adapter_type;
496 adapter->timer_ticks = bottom->timer_interval;
498 if (fc_host_permanent_port_name(shost) == -1)
499 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
501 switch (bottom->fc_topology) {
503 adapter->peer_d_id = bottom->peer_d_id & ZFCP_DID_MASK;
504 adapter->peer_wwpn = bottom->plogi_payload.wwpn;
505 adapter->peer_wwnn = bottom->plogi_payload.wwnn;
506 fc_host_port_type(shost) = FC_PORTTYPE_PTP;
508 case FSF_TOPO_FABRIC:
509 fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
512 fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
515 dev_err(&adapter->ccw_device->dev,
516 "Unknown or unsupported arbitrated loop "
517 "fibre channel topology detected\n");
518 zfcp_erp_adapter_shutdown(adapter, 0, "fsece_1", req);
525 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req)
527 struct zfcp_adapter *adapter = req->adapter;
528 struct fsf_qtcb *qtcb = req->qtcb;
529 struct fsf_qtcb_bottom_config *bottom = &qtcb->bottom.config;
530 struct Scsi_Host *shost = adapter->scsi_host;
532 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
535 adapter->fsf_lic_version = bottom->lic_version;
536 adapter->adapter_features = bottom->adapter_features;
537 adapter->connection_features = bottom->connection_features;
538 adapter->peer_wwpn = 0;
539 adapter->peer_wwnn = 0;
540 adapter->peer_d_id = 0;
542 switch (qtcb->header.fsf_status) {
544 if (zfcp_fsf_exchange_config_evaluate(req))
547 if (bottom->max_qtcb_size < sizeof(struct fsf_qtcb)) {
548 dev_err(&adapter->ccw_device->dev,
549 "FCP adapter maximum QTCB size (%d bytes) "
551 bottom->max_qtcb_size);
552 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh1", req);
555 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
558 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
559 fc_host_node_name(shost) = 0;
560 fc_host_port_name(shost) = 0;
561 fc_host_port_id(shost) = 0;
562 fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
563 fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
564 adapter->hydra_version = 0;
566 atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
569 zfcp_fsf_link_down_info_eval(req, "fsecdh2",
570 &qtcb->header.fsf_status_qual.link_down_info);
573 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3", req);
577 if (adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT) {
578 adapter->hardware_version = bottom->hardware_version;
579 memcpy(fc_host_serial_number(shost), bottom->serial_number,
580 min(FC_SERIAL_NUMBER_SIZE, 17));
581 EBCASC(fc_host_serial_number(shost),
582 min(FC_SERIAL_NUMBER_SIZE, 17));
585 if (FSF_QTCB_CURRENT_VERSION < bottom->low_qtcb_version) {
586 dev_err(&adapter->ccw_device->dev,
587 "The FCP adapter only supports newer "
588 "control block versions\n");
589 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh4", req);
592 if (FSF_QTCB_CURRENT_VERSION > bottom->high_qtcb_version) {
593 dev_err(&adapter->ccw_device->dev,
594 "The FCP adapter only supports older "
595 "control block versions\n");
596 zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh5", req);
600 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
602 struct zfcp_adapter *adapter = req->adapter;
603 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port;
604 struct Scsi_Host *shost = adapter->scsi_host;
607 memcpy(req->data, bottom, sizeof(*bottom));
609 if (adapter->connection_features & FSF_FEATURE_NPIV_MODE) {
610 fc_host_permanent_port_name(shost) = bottom->wwpn;
611 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
613 fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
614 fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
615 fc_host_supported_speeds(shost) = bottom->supported_speed;
618 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req)
620 struct fsf_qtcb *qtcb = req->qtcb;
622 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
625 switch (qtcb->header.fsf_status) {
627 zfcp_fsf_exchange_port_evaluate(req);
629 case FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE:
630 zfcp_fsf_exchange_port_evaluate(req);
631 zfcp_fsf_link_down_info_eval(req, "fsepdh1",
632 &qtcb->header.fsf_status_qual.link_down_info);
637 static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
639 struct zfcp_qdio_queue *req_q = &qdio->req_q;
641 spin_lock_bh(&qdio->req_q_lock);
642 if (atomic_read(&req_q->count))
644 spin_unlock_bh(&qdio->req_q_lock);
648 static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
650 struct zfcp_adapter *adapter = qdio->adapter;
653 spin_unlock_bh(&qdio->req_q_lock);
654 ret = wait_event_interruptible_timeout(qdio->req_q_wq,
655 zfcp_fsf_sbal_check(qdio), 5 * HZ);
659 atomic_inc(&qdio->req_q_full);
660 /* assume hanging outbound queue, try queue recovery */
661 zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
664 spin_lock_bh(&qdio->req_q_lock);
668 static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
670 struct zfcp_fsf_req *req;
673 req = mempool_alloc(pool, GFP_ATOMIC);
675 req = kmalloc(sizeof(*req), GFP_ATOMIC);
680 memset(req, 0, sizeof(*req));
685 static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
687 struct fsf_qtcb *qtcb;
690 qtcb = mempool_alloc(pool, GFP_ATOMIC);
692 qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
697 memset(qtcb, 0, sizeof(*qtcb));
701 static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
702 u32 fsf_cmd, mempool_t *pool)
704 struct qdio_buffer_element *sbale;
705 struct zfcp_qdio_queue *req_q = &qdio->req_q;
706 struct zfcp_adapter *adapter = qdio->adapter;
707 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
710 return ERR_PTR(-ENOMEM);
712 if (adapter->req_no == 0)
715 INIT_LIST_HEAD(&req->list);
716 init_timer(&req->timer);
717 init_completion(&req->completion);
719 req->adapter = adapter;
720 req->fsf_command = fsf_cmd;
721 req->req_id = adapter->req_no;
722 req->queue_req.sbal_number = 1;
723 req->queue_req.sbal_first = req_q->first;
724 req->queue_req.sbal_last = req_q->first;
725 req->queue_req.sbale_curr = 1;
727 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
728 sbale[0].addr = (void *) req->req_id;
729 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
731 if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
733 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
735 req->qtcb = zfcp_qtcb_alloc(NULL);
737 if (unlikely(!req->qtcb)) {
738 zfcp_fsf_req_free(req);
739 return ERR_PTR(-ENOMEM);
742 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
743 req->qtcb->prefix.req_id = req->req_id;
744 req->qtcb->prefix.ulp_info = 26;
745 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
746 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION;
747 req->qtcb->header.req_handle = req->req_id;
748 req->qtcb->header.fsf_command = req->fsf_command;
749 req->seq_no = adapter->fsf_req_seq_no;
750 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
751 sbale[1].addr = (void *) req->qtcb;
752 sbale[1].length = sizeof(struct fsf_qtcb);
755 if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) {
756 zfcp_fsf_req_free(req);
757 return ERR_PTR(-EIO);
763 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
765 struct zfcp_adapter *adapter = req->adapter;
766 struct zfcp_qdio *qdio = adapter->qdio;
769 int with_qtcb = (req->qtcb != NULL);
771 /* put allocated FSF request into hash table */
772 spin_lock_irqsave(&adapter->req_list_lock, flags);
773 idx = zfcp_reqlist_hash(req->req_id);
774 list_add_tail(&req->list, &adapter->req_list[idx]);
775 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
777 req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
778 req->issued = get_clock();
779 if (zfcp_qdio_send(qdio, &req->queue_req)) {
780 del_timer(&req->timer);
781 spin_lock_irqsave(&adapter->req_list_lock, flags);
782 /* lookup request again, list might have changed */
783 if (zfcp_reqlist_find_safe(adapter, req))
784 zfcp_reqlist_remove(adapter, req);
785 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
786 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1", req);
790 /* Don't increase for unsolicited status */
792 adapter->fsf_req_seq_no++;
799 * zfcp_fsf_status_read - send status read request
800 * @adapter: pointer to struct zfcp_adapter
801 * @req_flags: request flags
802 * Returns: 0 on success, ERROR otherwise
804 int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
806 struct zfcp_adapter *adapter = qdio->adapter;
807 struct zfcp_fsf_req *req;
808 struct fsf_status_read_buffer *sr_buf;
809 struct qdio_buffer_element *sbale;
812 spin_lock_bh(&qdio->req_q_lock);
813 if (zfcp_fsf_req_sbal_get(qdio))
816 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
817 adapter->pool.status_read_req);
819 retval = PTR_ERR(req);
823 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
824 sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
825 req->queue_req.sbale_curr = 2;
827 sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
832 memset(sr_buf, 0, sizeof(*sr_buf));
834 sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
835 sbale->addr = (void *) sr_buf;
836 sbale->length = sizeof(*sr_buf);
838 retval = zfcp_fsf_req_send(req);
840 goto failed_req_send;
845 mempool_free(sr_buf, adapter->pool.status_read_data);
847 zfcp_fsf_req_free(req);
848 zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
850 spin_unlock_bh(&qdio->req_q_lock);
854 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
856 struct zfcp_unit *unit = req->data;
857 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
859 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
862 switch (req->qtcb->header.fsf_status) {
863 case FSF_PORT_HANDLE_NOT_VALID:
864 if (fsq->word[0] == fsq->word[1]) {
865 zfcp_erp_adapter_reopen(unit->port->adapter, 0,
867 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
870 case FSF_LUN_HANDLE_NOT_VALID:
871 if (fsq->word[0] == fsq->word[1]) {
872 zfcp_erp_port_reopen(unit->port, 0, "fsafch2", req);
873 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
876 case FSF_FCP_COMMAND_DOES_NOT_EXIST:
877 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED;
880 zfcp_erp_port_boxed(unit->port, "fsafch3", req);
881 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
882 ZFCP_STATUS_FSFREQ_RETRY;
885 zfcp_erp_unit_boxed(unit, "fsafch4", req);
886 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
887 ZFCP_STATUS_FSFREQ_RETRY;
889 case FSF_ADAPTER_STATUS_AVAILABLE:
890 switch (fsq->word[0]) {
891 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
892 zfcp_fc_test_link(unit->port);
894 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
895 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
900 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED;
906 * zfcp_fsf_abort_fcp_command - abort running SCSI command
907 * @old_req_id: unsigned long
908 * @unit: pointer to struct zfcp_unit
909 * Returns: pointer to struct zfcp_fsf_req
912 struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long old_req_id,
913 struct zfcp_unit *unit)
915 struct qdio_buffer_element *sbale;
916 struct zfcp_fsf_req *req = NULL;
917 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
919 spin_lock_bh(&qdio->req_q_lock);
920 if (zfcp_fsf_req_sbal_get(qdio))
922 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
923 qdio->adapter->pool.scsi_abort);
929 if (unlikely(!(atomic_read(&unit->status) &
930 ZFCP_STATUS_COMMON_UNBLOCKED)))
933 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
934 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
935 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
938 req->handler = zfcp_fsf_abort_fcp_command_handler;
939 req->qtcb->header.lun_handle = unit->handle;
940 req->qtcb->header.port_handle = unit->port->handle;
941 req->qtcb->bottom.support.req_handle = (u64) old_req_id;
943 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
944 if (!zfcp_fsf_req_send(req))
948 zfcp_fsf_req_free(req);
951 spin_unlock_bh(&qdio->req_q_lock);
955 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
957 struct zfcp_adapter *adapter = req->adapter;
958 struct zfcp_send_ct *send_ct = req->data;
959 struct fsf_qtcb_header *header = &req->qtcb->header;
961 send_ct->status = -EINVAL;
963 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
966 switch (header->fsf_status) {
968 zfcp_dbf_san_ct_response(req);
971 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
972 zfcp_fsf_class_not_supp(req);
974 case FSF_ADAPTER_STATUS_AVAILABLE:
975 switch (header->fsf_status_qual.word[0]){
976 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
977 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
978 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
982 case FSF_ACCESS_DENIED:
985 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
986 ZFCP_STATUS_FSFREQ_RETRY;
988 case FSF_PORT_HANDLE_NOT_VALID:
989 zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
991 case FSF_GENERIC_COMMAND_REJECTED:
992 case FSF_PAYLOAD_SIZE_MISMATCH:
993 case FSF_REQUEST_SIZE_TOO_LARGE:
994 case FSF_RESPONSE_SIZE_TOO_LARGE:
995 case FSF_SBAL_MISMATCH:
996 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1001 if (send_ct->handler)
1002 send_ct->handler(send_ct->handler_data);
1005 static void zfcp_fsf_setup_ct_els_unchained(struct qdio_buffer_element *sbale,
1006 struct scatterlist *sg_req,
1007 struct scatterlist *sg_resp)
1009 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
1010 sbale[2].addr = sg_virt(sg_req);
1011 sbale[2].length = sg_req->length;
1012 sbale[3].addr = sg_virt(sg_resp);
1013 sbale[3].length = sg_resp->length;
1014 sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
1017 static int zfcp_fsf_one_sbal(struct scatterlist *sg)
1019 return sg_is_last(sg) && sg->length <= PAGE_SIZE;
1022 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
1023 struct scatterlist *sg_req,
1024 struct scatterlist *sg_resp,
1027 struct zfcp_adapter *adapter = req->adapter;
1028 struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
1030 u32 feat = adapter->adapter_features;
1033 if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
1034 if (!zfcp_fsf_one_sbal(sg_req) || !zfcp_fsf_one_sbal(sg_resp))
1037 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1041 /* use single, unchained SBAL if it can hold the request */
1042 if (zfcp_fsf_one_sbal(sg_req) && zfcp_fsf_one_sbal(sg_resp)) {
1043 zfcp_fsf_setup_ct_els_unchained(sbale, sg_req, sg_resp);
1047 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1048 SBAL_FLAGS0_TYPE_WRITE_READ,
1052 req->qtcb->bottom.support.req_buf_length = bytes;
1053 req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
1055 bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
1056 SBAL_FLAGS0_TYPE_WRITE_READ,
1057 sg_resp, max_sbals);
1058 req->qtcb->bottom.support.resp_buf_length = bytes;
1065 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req,
1066 struct scatterlist *sg_req,
1067 struct scatterlist *sg_resp,
1072 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp, max_sbals);
1076 /* common settings for ct/gs and els requests */
1077 req->qtcb->bottom.support.service_class = FSF_CLASS_3;
1078 req->qtcb->bottom.support.timeout = 2 * R_A_TOV;
1079 zfcp_fsf_start_timer(req, (2 * R_A_TOV + 10) * HZ);
1085 * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
1086 * @ct: pointer to struct zfcp_send_ct with data for request
1087 * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
1089 int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
1091 struct zfcp_wka_port *wka_port = ct->wka_port;
1092 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1093 struct zfcp_fsf_req *req;
1096 spin_lock_bh(&qdio->req_q_lock);
1097 if (zfcp_fsf_req_sbal_get(qdio))
1100 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
1107 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1108 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp,
1109 FSF_MAX_SBALS_PER_REQ);
1113 req->handler = zfcp_fsf_send_ct_handler;
1114 req->qtcb->header.port_handle = wka_port->handle;
1117 zfcp_dbf_san_ct_request(req);
1119 ret = zfcp_fsf_req_send(req);
1126 zfcp_fsf_req_free(req);
1128 spin_unlock_bh(&qdio->req_q_lock);
1132 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
1134 struct zfcp_send_els *send_els = req->data;
1135 struct zfcp_port *port = send_els->port;
1136 struct fsf_qtcb_header *header = &req->qtcb->header;
1138 send_els->status = -EINVAL;
1140 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1141 goto skip_fsfstatus;
1143 switch (header->fsf_status) {
1145 zfcp_dbf_san_els_response(req);
1146 send_els->status = 0;
1148 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
1149 zfcp_fsf_class_not_supp(req);
1151 case FSF_ADAPTER_STATUS_AVAILABLE:
1152 switch (header->fsf_status_qual.word[0]){
1153 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1154 if (port && (send_els->ls_code != ZFCP_LS_ADISC))
1155 zfcp_fc_test_link(port);
1157 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1158 case FSF_SQ_RETRY_IF_POSSIBLE:
1159 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1163 case FSF_ELS_COMMAND_REJECTED:
1164 case FSF_PAYLOAD_SIZE_MISMATCH:
1165 case FSF_REQUEST_SIZE_TOO_LARGE:
1166 case FSF_RESPONSE_SIZE_TOO_LARGE:
1168 case FSF_ACCESS_DENIED:
1170 zfcp_fsf_access_denied_port(req, port);
1172 case FSF_SBAL_MISMATCH:
1173 /* should never occure, avoided in zfcp_fsf_send_els */
1176 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1180 if (send_els->handler)
1181 send_els->handler(send_els->handler_data);
1185 * zfcp_fsf_send_els - initiate an ELS command (FC-FS)
1186 * @els: pointer to struct zfcp_send_els with data for the command
1188 int zfcp_fsf_send_els(struct zfcp_send_els *els)
1190 struct zfcp_fsf_req *req;
1191 struct zfcp_qdio *qdio = els->adapter->qdio;
1194 spin_lock_bh(&qdio->req_q_lock);
1195 if (zfcp_fsf_req_sbal_get(qdio))
1198 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
1205 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1206 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, 2);
1211 req->qtcb->bottom.support.d_id = els->d_id;
1212 req->handler = zfcp_fsf_send_els_handler;
1215 zfcp_dbf_san_els_request(req);
1217 ret = zfcp_fsf_req_send(req);
1224 zfcp_fsf_req_free(req);
1226 spin_unlock_bh(&qdio->req_q_lock);
1230 int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1232 struct qdio_buffer_element *sbale;
1233 struct zfcp_fsf_req *req;
1234 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1237 spin_lock_bh(&qdio->req_q_lock);
1238 if (zfcp_fsf_req_sbal_get(qdio))
1241 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1242 qdio->adapter->pool.erp_req);
1245 retval = PTR_ERR(req);
1249 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1250 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1251 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1252 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1254 req->qtcb->bottom.config.feature_selection =
1256 FSF_FEATURE_LUN_SHARING |
1257 FSF_FEATURE_NOTIFICATION_LOST |
1258 FSF_FEATURE_UPDATE_ALERT;
1259 req->erp_action = erp_action;
1260 req->handler = zfcp_fsf_exchange_config_data_handler;
1261 erp_action->fsf_req = req;
1263 zfcp_fsf_start_erp_timer(req);
1264 retval = zfcp_fsf_req_send(req);
1266 zfcp_fsf_req_free(req);
1267 erp_action->fsf_req = NULL;
1270 spin_unlock_bh(&qdio->req_q_lock);
1274 int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1275 struct fsf_qtcb_bottom_config *data)
1277 struct qdio_buffer_element *sbale;
1278 struct zfcp_fsf_req *req = NULL;
1281 spin_lock_bh(&qdio->req_q_lock);
1282 if (zfcp_fsf_req_sbal_get(qdio))
1285 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
1288 retval = PTR_ERR(req);
1292 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1293 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1294 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1295 req->handler = zfcp_fsf_exchange_config_data_handler;
1297 req->qtcb->bottom.config.feature_selection =
1299 FSF_FEATURE_LUN_SHARING |
1300 FSF_FEATURE_NOTIFICATION_LOST |
1301 FSF_FEATURE_UPDATE_ALERT;
1306 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1307 retval = zfcp_fsf_req_send(req);
1308 spin_unlock_bh(&qdio->req_q_lock);
1310 wait_for_completion(&req->completion);
1312 zfcp_fsf_req_free(req);
1316 spin_unlock_bh(&qdio->req_q_lock);
1321 * zfcp_fsf_exchange_port_data - request information about local port
1322 * @erp_action: ERP action for the adapter for which port data is requested
1323 * Returns: 0 on success, error otherwise
1325 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1327 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1328 struct qdio_buffer_element *sbale;
1329 struct zfcp_fsf_req *req;
1332 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1335 spin_lock_bh(&qdio->req_q_lock);
1336 if (zfcp_fsf_req_sbal_get(qdio))
1339 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1340 qdio->adapter->pool.erp_req);
1343 retval = PTR_ERR(req);
1347 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1348 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1349 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1350 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1352 req->handler = zfcp_fsf_exchange_port_data_handler;
1353 req->erp_action = erp_action;
1354 erp_action->fsf_req = req;
1356 zfcp_fsf_start_erp_timer(req);
1357 retval = zfcp_fsf_req_send(req);
1359 zfcp_fsf_req_free(req);
1360 erp_action->fsf_req = NULL;
1363 spin_unlock_bh(&qdio->req_q_lock);
1368 * zfcp_fsf_exchange_port_data_sync - request information about local port
1369 * @qdio: pointer to struct zfcp_qdio
1370 * @data: pointer to struct fsf_qtcb_bottom_port
1371 * Returns: 0 on success, error otherwise
1373 int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1374 struct fsf_qtcb_bottom_port *data)
1376 struct qdio_buffer_element *sbale;
1377 struct zfcp_fsf_req *req = NULL;
1380 if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
1383 spin_lock_bh(&qdio->req_q_lock);
1384 if (zfcp_fsf_req_sbal_get(qdio))
1387 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
1390 retval = PTR_ERR(req);
1397 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1398 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1399 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1401 req->handler = zfcp_fsf_exchange_port_data_handler;
1402 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1403 retval = zfcp_fsf_req_send(req);
1404 spin_unlock_bh(&qdio->req_q_lock);
1407 wait_for_completion(&req->completion);
1409 zfcp_fsf_req_free(req);
1414 spin_unlock_bh(&qdio->req_q_lock);
1418 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
1420 struct zfcp_port *port = req->data;
1421 struct fsf_qtcb_header *header = &req->qtcb->header;
1422 struct fsf_plogi *plogi;
1424 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1427 switch (header->fsf_status) {
1428 case FSF_PORT_ALREADY_OPEN:
1430 case FSF_ACCESS_DENIED:
1431 zfcp_fsf_access_denied_port(req, port);
1433 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1434 dev_warn(&req->adapter->ccw_device->dev,
1435 "Not enough FCP adapter resources to open "
1436 "remote port 0x%016Lx\n",
1437 (unsigned long long)port->wwpn);
1438 zfcp_erp_port_failed(port, "fsoph_1", req);
1439 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1441 case FSF_ADAPTER_STATUS_AVAILABLE:
1442 switch (header->fsf_status_qual.word[0]) {
1443 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1444 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1445 case FSF_SQ_NO_RETRY_POSSIBLE:
1446 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1451 port->handle = header->port_handle;
1452 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN |
1453 ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1454 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1455 ZFCP_STATUS_COMMON_ACCESS_BOXED,
1457 /* check whether D_ID has changed during open */
1459 * FIXME: This check is not airtight, as the FCP channel does
1460 * not monitor closures of target port connections caused on
1461 * the remote side. Thus, they might miss out on invalidating
1462 * locally cached WWPNs (and other N_Port parameters) of gone
1463 * target ports. So, our heroic attempt to make things safe
1464 * could be undermined by 'open port' response data tagged with
1465 * obsolete WWPNs. Another reason to monitor potential
1466 * connection closures ourself at least (by interpreting
1467 * incoming ELS' and unsolicited status). It just crosses my
1468 * mind that one should be able to cross-check by means of
1469 * another GID_PN straight after a port has been opened.
1470 * Alternately, an ADISC/PDISC ELS should suffice, as well.
1472 plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
1473 if (req->qtcb->bottom.support.els1_length >=
1474 FSF_PLOGI_MIN_LEN) {
1475 if (plogi->serv_param.wwpn != port->wwpn) {
1477 dev_warn(&port->adapter->ccw_device->dev,
1478 "A port opened with WWPN 0x%016Lx "
1479 "returned data that identifies it as "
1481 (unsigned long long) port->wwpn,
1482 (unsigned long long)
1483 plogi->serv_param.wwpn);
1485 port->wwnn = plogi->serv_param.wwnn;
1486 zfcp_fc_plogi_evaluate(port, plogi);
1490 case FSF_UNKNOWN_OP_SUBTYPE:
1491 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1496 put_device(&port->sysfs_device);
1500 * zfcp_fsf_open_port - create and send open port request
1501 * @erp_action: pointer to struct zfcp_erp_action
1502 * Returns: 0 on success, error otherwise
1504 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1506 struct qdio_buffer_element *sbale;
1507 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1508 struct zfcp_port *port = erp_action->port;
1509 struct zfcp_fsf_req *req;
1512 spin_lock_bh(&qdio->req_q_lock);
1513 if (zfcp_fsf_req_sbal_get(qdio))
1516 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1517 qdio->adapter->pool.erp_req);
1520 retval = PTR_ERR(req);
1524 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1525 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1526 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1527 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1529 req->handler = zfcp_fsf_open_port_handler;
1530 req->qtcb->bottom.support.d_id = port->d_id;
1532 req->erp_action = erp_action;
1533 erp_action->fsf_req = req;
1534 get_device(&port->sysfs_device);
1536 zfcp_fsf_start_erp_timer(req);
1537 retval = zfcp_fsf_req_send(req);
1539 zfcp_fsf_req_free(req);
1540 erp_action->fsf_req = NULL;
1541 put_device(&port->sysfs_device);
1544 spin_unlock_bh(&qdio->req_q_lock);
1548 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req)
1550 struct zfcp_port *port = req->data;
1552 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1555 switch (req->qtcb->header.fsf_status) {
1556 case FSF_PORT_HANDLE_NOT_VALID:
1557 zfcp_erp_adapter_reopen(port->adapter, 0, "fscph_1", req);
1558 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1560 case FSF_ADAPTER_STATUS_AVAILABLE:
1563 zfcp_erp_modify_port_status(port, "fscph_2", req,
1564 ZFCP_STATUS_COMMON_OPEN,
1571 * zfcp_fsf_close_port - create and send close port request
1572 * @erp_action: pointer to struct zfcp_erp_action
1573 * Returns: 0 on success, error otherwise
1575 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1577 struct qdio_buffer_element *sbale;
1578 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1579 struct zfcp_fsf_req *req;
1582 spin_lock_bh(&qdio->req_q_lock);
1583 if (zfcp_fsf_req_sbal_get(qdio))
1586 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1587 qdio->adapter->pool.erp_req);
1590 retval = PTR_ERR(req);
1594 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1595 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1596 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1597 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1599 req->handler = zfcp_fsf_close_port_handler;
1600 req->data = erp_action->port;
1601 req->erp_action = erp_action;
1602 req->qtcb->header.port_handle = erp_action->port->handle;
1603 erp_action->fsf_req = req;
1605 zfcp_fsf_start_erp_timer(req);
1606 retval = zfcp_fsf_req_send(req);
1608 zfcp_fsf_req_free(req);
1609 erp_action->fsf_req = NULL;
1612 spin_unlock_bh(&qdio->req_q_lock);
1616 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1618 struct zfcp_wka_port *wka_port = req->data;
1619 struct fsf_qtcb_header *header = &req->qtcb->header;
1621 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) {
1622 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1626 switch (header->fsf_status) {
1627 case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
1628 dev_warn(&req->adapter->ccw_device->dev,
1629 "Opening WKA port 0x%x failed\n", wka_port->d_id);
1631 case FSF_ADAPTER_STATUS_AVAILABLE:
1632 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1634 case FSF_ACCESS_DENIED:
1635 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1638 wka_port->handle = header->port_handle;
1640 case FSF_PORT_ALREADY_OPEN:
1641 wka_port->status = ZFCP_WKA_PORT_ONLINE;
1644 wake_up(&wka_port->completion_wq);
1648 * zfcp_fsf_open_wka_port - create and send open wka-port request
1649 * @wka_port: pointer to struct zfcp_wka_port
1650 * Returns: 0 on success, error otherwise
1652 int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
1654 struct qdio_buffer_element *sbale;
1655 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1656 struct zfcp_fsf_req *req;
1659 spin_lock_bh(&qdio->req_q_lock);
1660 if (zfcp_fsf_req_sbal_get(qdio))
1663 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1664 qdio->adapter->pool.erp_req);
1666 if (unlikely(IS_ERR(req))) {
1667 retval = PTR_ERR(req);
1671 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1672 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1673 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1674 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1676 req->handler = zfcp_fsf_open_wka_port_handler;
1677 req->qtcb->bottom.support.d_id = wka_port->d_id;
1678 req->data = wka_port;
1680 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1681 retval = zfcp_fsf_req_send(req);
1683 zfcp_fsf_req_free(req);
1685 spin_unlock_bh(&qdio->req_q_lock);
1689 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1691 struct zfcp_wka_port *wka_port = req->data;
1693 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) {
1694 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1695 zfcp_erp_adapter_reopen(wka_port->adapter, 0, "fscwph1", req);
1698 wka_port->status = ZFCP_WKA_PORT_OFFLINE;
1699 wake_up(&wka_port->completion_wq);
1703 * zfcp_fsf_close_wka_port - create and send close wka port request
1704 * @erp_action: pointer to struct zfcp_erp_action
1705 * Returns: 0 on success, error otherwise
1707 int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
1709 struct qdio_buffer_element *sbale;
1710 struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1711 struct zfcp_fsf_req *req;
1714 spin_lock_bh(&qdio->req_q_lock);
1715 if (zfcp_fsf_req_sbal_get(qdio))
1718 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1719 qdio->adapter->pool.erp_req);
1721 if (unlikely(IS_ERR(req))) {
1722 retval = PTR_ERR(req);
1726 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1727 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1728 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1729 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1731 req->handler = zfcp_fsf_close_wka_port_handler;
1732 req->data = wka_port;
1733 req->qtcb->header.port_handle = wka_port->handle;
1735 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
1736 retval = zfcp_fsf_req_send(req);
1738 zfcp_fsf_req_free(req);
1740 spin_unlock_bh(&qdio->req_q_lock);
1744 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
1746 struct zfcp_port *port = req->data;
1747 struct fsf_qtcb_header *header = &req->qtcb->header;
1748 struct zfcp_unit *unit;
1750 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1753 switch (header->fsf_status) {
1754 case FSF_PORT_HANDLE_NOT_VALID:
1755 zfcp_erp_adapter_reopen(port->adapter, 0, "fscpph1", req);
1756 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1758 case FSF_ACCESS_DENIED:
1759 zfcp_fsf_access_denied_port(req, port);
1761 case FSF_PORT_BOXED:
1762 /* can't use generic zfcp_erp_modify_port_status because
1763 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port */
1764 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1765 read_lock(&port->unit_list_lock);
1766 list_for_each_entry(unit, &port->unit_list, list)
1767 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1769 read_unlock(&port->unit_list_lock);
1770 zfcp_erp_port_boxed(port, "fscpph2", req);
1771 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1772 ZFCP_STATUS_FSFREQ_RETRY;
1775 case FSF_ADAPTER_STATUS_AVAILABLE:
1776 switch (header->fsf_status_qual.word[0]) {
1777 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1779 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1780 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1785 /* can't use generic zfcp_erp_modify_port_status because
1786 * ZFCP_STATUS_COMMON_OPEN must not be reset for the port
1788 atomic_clear_mask(ZFCP_STATUS_PORT_PHYS_OPEN, &port->status);
1789 read_lock(&port->unit_list_lock);
1790 list_for_each_entry(unit, &port->unit_list, list)
1791 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN,
1793 read_unlock(&port->unit_list_lock);
1799 * zfcp_fsf_close_physical_port - close physical port
1800 * @erp_action: pointer to struct zfcp_erp_action
1801 * Returns: 0 on success
1803 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1805 struct qdio_buffer_element *sbale;
1806 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
1807 struct zfcp_fsf_req *req;
1810 spin_lock_bh(&qdio->req_q_lock);
1811 if (zfcp_fsf_req_sbal_get(qdio))
1814 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1815 qdio->adapter->pool.erp_req);
1818 retval = PTR_ERR(req);
1822 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1823 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1824 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1825 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
1827 req->data = erp_action->port;
1828 req->qtcb->header.port_handle = erp_action->port->handle;
1829 req->erp_action = erp_action;
1830 req->handler = zfcp_fsf_close_physical_port_handler;
1831 erp_action->fsf_req = req;
1833 zfcp_fsf_start_erp_timer(req);
1834 retval = zfcp_fsf_req_send(req);
1836 zfcp_fsf_req_free(req);
1837 erp_action->fsf_req = NULL;
1840 spin_unlock_bh(&qdio->req_q_lock);
1844 static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
1846 struct zfcp_adapter *adapter = req->adapter;
1847 struct zfcp_unit *unit = req->data;
1848 struct fsf_qtcb_header *header = &req->qtcb->header;
1849 struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
1850 struct fsf_queue_designator *queue_designator =
1851 &header->fsf_status_qual.fsf_queue_designator;
1852 int exclusive, readwrite;
1854 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
1857 atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
1858 ZFCP_STATUS_COMMON_ACCESS_BOXED |
1859 ZFCP_STATUS_UNIT_SHARED |
1860 ZFCP_STATUS_UNIT_READONLY,
1863 switch (header->fsf_status) {
1865 case FSF_PORT_HANDLE_NOT_VALID:
1866 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fsouh_1", req);
1868 case FSF_LUN_ALREADY_OPEN:
1870 case FSF_ACCESS_DENIED:
1871 zfcp_fsf_access_denied_unit(req, unit);
1872 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1873 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1875 case FSF_PORT_BOXED:
1876 zfcp_erp_port_boxed(unit->port, "fsouh_2", req);
1877 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
1878 ZFCP_STATUS_FSFREQ_RETRY;
1880 case FSF_LUN_SHARING_VIOLATION:
1881 if (header->fsf_status_qual.word[0])
1882 dev_warn(&adapter->ccw_device->dev,
1883 "LUN 0x%Lx on port 0x%Lx is already in "
1884 "use by CSS%d, MIF Image ID %x\n",
1885 (unsigned long long)unit->fcp_lun,
1886 (unsigned long long)unit->port->wwpn,
1887 queue_designator->cssid,
1888 queue_designator->hla);
1890 zfcp_act_eval_err(adapter,
1891 header->fsf_status_qual.word[2]);
1892 zfcp_erp_unit_access_denied(unit, "fsouh_3", req);
1893 atomic_clear_mask(ZFCP_STATUS_UNIT_SHARED, &unit->status);
1894 atomic_clear_mask(ZFCP_STATUS_UNIT_READONLY, &unit->status);
1895 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1897 case FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED:
1898 dev_warn(&adapter->ccw_device->dev,
1899 "No handle is available for LUN "
1900 "0x%016Lx on port 0x%016Lx\n",
1901 (unsigned long long)unit->fcp_lun,
1902 (unsigned long long)unit->port->wwpn);
1903 zfcp_erp_unit_failed(unit, "fsouh_4", req);
1905 case FSF_INVALID_COMMAND_OPTION:
1906 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1908 case FSF_ADAPTER_STATUS_AVAILABLE:
1909 switch (header->fsf_status_qual.word[0]) {
1910 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
1911 zfcp_fc_test_link(unit->port);
1913 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
1914 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1920 unit->handle = header->lun_handle;
1921 atomic_set_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
1923 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
1924 (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
1925 !zfcp_ccw_priv_sch(adapter)) {
1926 exclusive = (bottom->lun_access_info &
1927 FSF_UNIT_ACCESS_EXCLUSIVE);
1928 readwrite = (bottom->lun_access_info &
1929 FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
1932 atomic_set_mask(ZFCP_STATUS_UNIT_SHARED,
1936 atomic_set_mask(ZFCP_STATUS_UNIT_READONLY,
1938 dev_info(&adapter->ccw_device->dev,
1939 "SCSI device at LUN 0x%016Lx on port "
1940 "0x%016Lx opened read-only\n",
1941 (unsigned long long)unit->fcp_lun,
1942 (unsigned long long)unit->port->wwpn);
1945 if (exclusive && !readwrite) {
1946 dev_err(&adapter->ccw_device->dev,
1947 "Exclusive read-only access not "
1948 "supported (unit 0x%016Lx, "
1950 (unsigned long long)unit->fcp_lun,
1951 (unsigned long long)unit->port->wwpn);
1952 zfcp_erp_unit_failed(unit, "fsouh_5", req);
1953 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1954 zfcp_erp_unit_shutdown(unit, 0, "fsouh_6", req);
1955 } else if (!exclusive && readwrite) {
1956 dev_err(&adapter->ccw_device->dev,
1957 "Shared read-write access not "
1958 "supported (unit 0x%016Lx, port "
1960 (unsigned long long)unit->fcp_lun,
1961 (unsigned long long)unit->port->wwpn);
1962 zfcp_erp_unit_failed(unit, "fsouh_7", req);
1963 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
1964 zfcp_erp_unit_shutdown(unit, 0, "fsouh_8", req);
1972 * zfcp_fsf_open_unit - open unit
1973 * @erp_action: pointer to struct zfcp_erp_action
1974 * Returns: 0 on success, error otherwise
1976 int zfcp_fsf_open_unit(struct zfcp_erp_action *erp_action)
1978 struct qdio_buffer_element *sbale;
1979 struct zfcp_adapter *adapter = erp_action->adapter;
1980 struct zfcp_qdio *qdio = adapter->qdio;
1981 struct zfcp_fsf_req *req;
1984 spin_lock_bh(&qdio->req_q_lock);
1985 if (zfcp_fsf_req_sbal_get(qdio))
1988 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1989 adapter->pool.erp_req);
1992 retval = PTR_ERR(req);
1996 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
1997 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
1998 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
1999 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2001 req->qtcb->header.port_handle = erp_action->port->handle;
2002 req->qtcb->bottom.support.fcp_lun = erp_action->unit->fcp_lun;
2003 req->handler = zfcp_fsf_open_unit_handler;
2004 req->data = erp_action->unit;
2005 req->erp_action = erp_action;
2006 erp_action->fsf_req = req;
2008 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE))
2009 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING;
2011 zfcp_fsf_start_erp_timer(req);
2012 retval = zfcp_fsf_req_send(req);
2014 zfcp_fsf_req_free(req);
2015 erp_action->fsf_req = NULL;
2018 spin_unlock_bh(&qdio->req_q_lock);
2022 static void zfcp_fsf_close_unit_handler(struct zfcp_fsf_req *req)
2024 struct zfcp_unit *unit = req->data;
2026 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2029 switch (req->qtcb->header.fsf_status) {
2030 case FSF_PORT_HANDLE_NOT_VALID:
2031 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fscuh_1", req);
2032 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2034 case FSF_LUN_HANDLE_NOT_VALID:
2035 zfcp_erp_port_reopen(unit->port, 0, "fscuh_2", req);
2036 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2038 case FSF_PORT_BOXED:
2039 zfcp_erp_port_boxed(unit->port, "fscuh_3", req);
2040 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2041 ZFCP_STATUS_FSFREQ_RETRY;
2043 case FSF_ADAPTER_STATUS_AVAILABLE:
2044 switch (req->qtcb->header.fsf_status_qual.word[0]) {
2045 case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
2046 zfcp_fc_test_link(unit->port);
2048 case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
2049 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2054 atomic_clear_mask(ZFCP_STATUS_COMMON_OPEN, &unit->status);
2060 * zfcp_fsf_close_unit - close zfcp unit
2061 * @erp_action: pointer to struct zfcp_unit
2062 * Returns: 0 on success, error otherwise
2064 int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
2066 struct qdio_buffer_element *sbale;
2067 struct zfcp_qdio *qdio = erp_action->adapter->qdio;
2068 struct zfcp_fsf_req *req;
2071 spin_lock_bh(&qdio->req_q_lock);
2072 if (zfcp_fsf_req_sbal_get(qdio))
2075 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
2076 qdio->adapter->pool.erp_req);
2079 retval = PTR_ERR(req);
2083 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2084 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2085 sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
2086 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2088 req->qtcb->header.port_handle = erp_action->port->handle;
2089 req->qtcb->header.lun_handle = erp_action->unit->handle;
2090 req->handler = zfcp_fsf_close_unit_handler;
2091 req->data = erp_action->unit;
2092 req->erp_action = erp_action;
2093 erp_action->fsf_req = req;
2095 zfcp_fsf_start_erp_timer(req);
2096 retval = zfcp_fsf_req_send(req);
2098 zfcp_fsf_req_free(req);
2099 erp_action->fsf_req = NULL;
2102 spin_unlock_bh(&qdio->req_q_lock);
2106 static void zfcp_fsf_update_lat(struct fsf_latency_record *lat_rec, u32 lat)
2108 lat_rec->sum += lat;
2109 lat_rec->min = min(lat_rec->min, lat);
2110 lat_rec->max = max(lat_rec->max, lat);
2113 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
2115 struct fsf_qual_latency_info *lat_in;
2116 struct latency_cont *lat = NULL;
2117 struct zfcp_unit *unit = req->unit;
2118 struct zfcp_blk_drv_data blktrc;
2119 int ticks = req->adapter->timer_ticks;
2121 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info;
2124 blktrc.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2125 if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
2126 blktrc.flags |= ZFCP_BLK_REQ_ERROR;
2127 blktrc.inb_usage = req->queue_req.qdio_inb_usage;
2128 blktrc.outb_usage = req->queue_req.qdio_outb_usage;
2130 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
2131 blktrc.flags |= ZFCP_BLK_LAT_VALID;
2132 blktrc.channel_lat = lat_in->channel_lat * ticks;
2133 blktrc.fabric_lat = lat_in->fabric_lat * ticks;
2135 switch (req->qtcb->bottom.io.data_direction) {
2136 case FSF_DATADIR_READ:
2137 lat = &unit->latencies.read;
2139 case FSF_DATADIR_WRITE:
2140 lat = &unit->latencies.write;
2142 case FSF_DATADIR_CMND:
2143 lat = &unit->latencies.cmd;
2148 spin_lock(&unit->latencies.lock);
2149 zfcp_fsf_update_lat(&lat->channel, lat_in->channel_lat);
2150 zfcp_fsf_update_lat(&lat->fabric, lat_in->fabric_lat);
2152 spin_unlock(&unit->latencies.lock);
2156 blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
2160 static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2162 struct scsi_cmnd *scpnt;
2163 struct fcp_resp_with_ext *fcp_rsp;
2164 unsigned long flags;
2166 read_lock_irqsave(&req->adapter->abort_lock, flags);
2169 if (unlikely(!scpnt)) {
2170 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2174 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ABORTED)) {
2175 set_host_byte(scpnt, DID_SOFT_ERROR);
2176 goto skip_fsfstatus;
2179 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
2180 set_host_byte(scpnt, DID_ERROR);
2181 goto skip_fsfstatus;
2184 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2185 zfcp_fc_eval_fcp_rsp(fcp_rsp, scpnt);
2187 zfcp_fsf_req_trace(req, scpnt);
2190 if (scpnt->result != 0)
2191 zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
2192 else if (scpnt->retries > 0)
2193 zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
2195 zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
2197 scpnt->host_scribble = NULL;
2198 (scpnt->scsi_done) (scpnt);
2200 * We must hold this lock until scsi_done has been called.
2201 * Otherwise we may call scsi_done after abort regarding this
2202 * command has completed.
2203 * Note: scsi_done must not block!
2205 read_unlock_irqrestore(&req->adapter->abort_lock, flags);
2208 static void zfcp_fsf_send_fcp_ctm_handler(struct zfcp_fsf_req *req)
2210 struct fcp_resp_with_ext *fcp_rsp;
2211 struct fcp_resp_rsp_info *rsp_info;
2213 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp;
2214 rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
2216 if ((rsp_info->rsp_code != FCP_TMF_CMPL) ||
2217 (req->status & ZFCP_STATUS_FSFREQ_ERROR))
2218 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED;
2222 static void zfcp_fsf_send_fcp_command_handler(struct zfcp_fsf_req *req)
2224 struct zfcp_unit *unit;
2225 struct fsf_qtcb_header *header = &req->qtcb->header;
2227 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT))
2232 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
2233 goto skip_fsfstatus;
2235 switch (header->fsf_status) {
2236 case FSF_HANDLE_MISMATCH:
2237 case FSF_PORT_HANDLE_NOT_VALID:
2238 zfcp_erp_adapter_reopen(unit->port->adapter, 0, "fssfch1", req);
2239 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2241 case FSF_FCPLUN_NOT_VALID:
2242 case FSF_LUN_HANDLE_NOT_VALID:
2243 zfcp_erp_port_reopen(unit->port, 0, "fssfch2", req);
2244 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2246 case FSF_SERVICE_CLASS_NOT_SUPPORTED:
2247 zfcp_fsf_class_not_supp(req);
2249 case FSF_ACCESS_DENIED:
2250 zfcp_fsf_access_denied_unit(req, unit);
2252 case FSF_DIRECTION_INDICATOR_NOT_VALID:
2253 dev_err(&req->adapter->ccw_device->dev,
2254 "Incorrect direction %d, unit 0x%016Lx on port "
2255 "0x%016Lx closed\n",
2256 req->qtcb->bottom.io.data_direction,
2257 (unsigned long long)unit->fcp_lun,
2258 (unsigned long long)unit->port->wwpn);
2259 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch3",
2261 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2263 case FSF_CMND_LENGTH_NOT_VALID:
2264 dev_err(&req->adapter->ccw_device->dev,
2265 "Incorrect CDB length %d, unit 0x%016Lx on "
2266 "port 0x%016Lx closed\n",
2267 req->qtcb->bottom.io.fcp_cmnd_length,
2268 (unsigned long long)unit->fcp_lun,
2269 (unsigned long long)unit->port->wwpn);
2270 zfcp_erp_adapter_shutdown(unit->port->adapter, 0, "fssfch4",
2272 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2274 case FSF_PORT_BOXED:
2275 zfcp_erp_port_boxed(unit->port, "fssfch5", req);
2276 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2277 ZFCP_STATUS_FSFREQ_RETRY;
2280 zfcp_erp_unit_boxed(unit, "fssfch6", req);
2281 req->status |= ZFCP_STATUS_FSFREQ_ERROR |
2282 ZFCP_STATUS_FSFREQ_RETRY;
2284 case FSF_ADAPTER_STATUS_AVAILABLE:
2285 if (header->fsf_status_qual.word[0] ==
2286 FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
2287 zfcp_fc_test_link(unit->port);
2288 req->status |= ZFCP_STATUS_FSFREQ_ERROR;
2292 if (req->status & ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT)
2293 zfcp_fsf_send_fcp_ctm_handler(req);
2295 zfcp_fsf_send_fcp_command_task_handler(req);
2297 put_device(&unit->sysfs_device);
2302 * zfcp_fsf_send_fcp_command_task - initiate an FCP command (for a SCSI command)
2303 * @unit: unit where command is sent to
2304 * @scsi_cmnd: scsi command to be sent
2306 int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
2307 struct scsi_cmnd *scsi_cmnd)
2309 struct zfcp_fsf_req *req;
2310 struct fcp_cmnd *fcp_cmnd;
2311 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
2312 int real_bytes, retval = -EIO;
2313 struct zfcp_adapter *adapter = unit->port->adapter;
2314 struct zfcp_qdio *qdio = adapter->qdio;
2316 if (unlikely(!(atomic_read(&unit->status) &
2317 ZFCP_STATUS_COMMON_UNBLOCKED)))
2320 spin_lock(&qdio->req_q_lock);
2321 if (atomic_read(&qdio->req_q.count) <= 0) {
2322 atomic_inc(&qdio->req_q_full);
2326 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2327 adapter->pool.scsi_req);
2330 retval = PTR_ERR(req);
2334 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
2335 get_device(&unit->sysfs_device);
2337 req->data = scsi_cmnd;
2338 req->handler = zfcp_fsf_send_fcp_command_handler;
2339 req->qtcb->header.lun_handle = unit->handle;
2340 req->qtcb->header.port_handle = unit->port->handle;
2341 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2342 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2344 scsi_cmnd->host_scribble = (unsigned char *) req->req_id;
2347 * set depending on data direction:
2348 * data direction bits in SBALE (SB Type)
2349 * data direction bits in QTCB
2351 switch (scsi_cmnd->sc_data_direction) {
2353 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2355 case DMA_FROM_DEVICE:
2356 req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
2359 req->qtcb->bottom.io.data_direction = FSF_DATADIR_WRITE;
2360 sbtype = SBAL_FLAGS0_TYPE_WRITE;
2362 case DMA_BIDIRECTIONAL:
2363 goto failed_scsi_cmnd;
2366 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2367 zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd);
2369 real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
2370 scsi_sglist(scsi_cmnd),
2371 FSF_MAX_SBALS_PER_REQ);
2372 if (unlikely(real_bytes < 0)) {
2373 if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
2374 dev_err(&adapter->ccw_device->dev,
2375 "Oversize data package, unit 0x%016Lx "
2376 "on port 0x%016Lx closed\n",
2377 (unsigned long long)unit->fcp_lun,
2378 (unsigned long long)unit->port->wwpn);
2379 zfcp_erp_unit_shutdown(unit, 0, "fssfct1", req);
2382 goto failed_scsi_cmnd;
2385 retval = zfcp_fsf_req_send(req);
2386 if (unlikely(retval))
2387 goto failed_scsi_cmnd;
2392 put_device(&unit->sysfs_device);
2393 zfcp_fsf_req_free(req);
2394 scsi_cmnd->host_scribble = NULL;
2396 spin_unlock(&qdio->req_q_lock);
2401 * zfcp_fsf_send_fcp_ctm - send SCSI task management command
2402 * @unit: pointer to struct zfcp_unit
2403 * @tm_flags: unsigned byte for task management flags
2404 * Returns: on success pointer to struct fsf_req, NULL otherwise
2406 struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *unit, u8 tm_flags)
2408 struct qdio_buffer_element *sbale;
2409 struct zfcp_fsf_req *req = NULL;
2410 struct fcp_cmnd *fcp_cmnd;
2411 struct zfcp_qdio *qdio = unit->port->adapter->qdio;
2413 if (unlikely(!(atomic_read(&unit->status) &
2414 ZFCP_STATUS_COMMON_UNBLOCKED)))
2417 spin_lock_bh(&qdio->req_q_lock);
2418 if (zfcp_fsf_req_sbal_get(qdio))
2421 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2422 qdio->adapter->pool.scsi_req);
2429 req->status |= ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT;
2431 req->handler = zfcp_fsf_send_fcp_command_handler;
2432 req->qtcb->header.lun_handle = unit->handle;
2433 req->qtcb->header.port_handle = unit->port->handle;
2434 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
2435 req->qtcb->bottom.io.service_class = FSF_CLASS_3;
2436 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN;
2438 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2439 sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
2440 sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
2442 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
2443 zfcp_fc_fcp_tm(fcp_cmnd, unit->device, tm_flags);
2445 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT);
2446 if (!zfcp_fsf_req_send(req))
2449 zfcp_fsf_req_free(req);
2452 spin_unlock_bh(&qdio->req_q_lock);
2456 static void zfcp_fsf_control_file_handler(struct zfcp_fsf_req *req)
2461 * zfcp_fsf_control_file - control file upload/download
2462 * @adapter: pointer to struct zfcp_adapter
2463 * @fsf_cfdc: pointer to struct zfcp_fsf_cfdc
2464 * Returns: on success pointer to struct zfcp_fsf_req, NULL otherwise
2466 struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2467 struct zfcp_fsf_cfdc *fsf_cfdc)
2469 struct qdio_buffer_element *sbale;
2470 struct zfcp_qdio *qdio = adapter->qdio;
2471 struct zfcp_fsf_req *req = NULL;
2472 struct fsf_qtcb_bottom_support *bottom;
2473 int direction, retval = -EIO, bytes;
2475 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2476 return ERR_PTR(-EOPNOTSUPP);
2478 switch (fsf_cfdc->command) {
2479 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2480 direction = SBAL_FLAGS0_TYPE_WRITE;
2482 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2483 direction = SBAL_FLAGS0_TYPE_READ;
2486 return ERR_PTR(-EINVAL);
2489 spin_lock_bh(&qdio->req_q_lock);
2490 if (zfcp_fsf_req_sbal_get(qdio))
2493 req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
2499 req->handler = zfcp_fsf_control_file_handler;
2501 sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
2502 sbale[0].flags |= direction;
2504 bottom = &req->qtcb->bottom.support;
2505 bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
2506 bottom->option = fsf_cfdc->option;
2508 bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
2509 direction, fsf_cfdc->sg,
2510 FSF_MAX_SBALS_PER_REQ);
2511 if (bytes != ZFCP_CFDC_MAX_SIZE) {
2512 zfcp_fsf_req_free(req);
2516 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
2517 retval = zfcp_fsf_req_send(req);
2519 spin_unlock_bh(&qdio->req_q_lock);
2522 wait_for_completion(&req->completion);
2525 return ERR_PTR(retval);
2529 * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
2530 * @adapter: pointer to struct zfcp_adapter
2531 * @sbal_idx: response queue index of SBAL to be processed
2533 void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2535 struct zfcp_adapter *adapter = qdio->adapter;
2536 struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
2537 struct qdio_buffer_element *sbale;
2538 struct zfcp_fsf_req *fsf_req;
2539 unsigned long flags, req_id;
2542 for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
2544 sbale = &sbal->element[idx];
2545 req_id = (unsigned long) sbale->addr;
2546 spin_lock_irqsave(&adapter->req_list_lock, flags);
2547 fsf_req = zfcp_reqlist_find(adapter, req_id);
2551 * Unknown request means that we have potentially memory
2552 * corruption and must stop the machine immediately.
2554 panic("error: unknown req_id (%lx) on adapter %s.\n",
2555 req_id, dev_name(&adapter->ccw_device->dev));
2557 list_del(&fsf_req->list);
2558 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
2560 fsf_req->queue_req.sbal_response = sbal_idx;
2561 fsf_req->queue_req.qdio_inb_usage =
2562 atomic_read(&qdio->resp_q.count);
2563 zfcp_fsf_req_complete(fsf_req);
2565 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))