static int debug_fcoe;
#define FCOE_MAX_QUEUE_DEPTH 256
+#define FCOE_LOW_QUEUE_DEPTH 32
/* destination address mode */
#define FCOE_GW_ADDR_MODE 0x00
/* Function Prototyes */
static int fcoe_check_wait_queue(struct fc_lport *);
-static void fcoe_insert_wait_queue_head(struct fc_lport *, struct sk_buff *);
-static void fcoe_insert_wait_queue(struct fc_lport *, struct sk_buff *);
static void fcoe_recv_flogi(struct fcoe_softc *, struct fc_frame *, u8 *);
#ifdef CONFIG_HOTPLUG_CPU
static int fcoe_cpu_callback(struct notifier_block *, ulong, void *);
*
* Returns: none
*/
-static void fcoe_create_percpu_data(int cpu)
+static void fcoe_create_percpu_data(unsigned int cpu)
{
struct fc_lport *lp;
struct fcoe_softc *fc;
*
* Retuns: none
*/
-static void fcoe_destroy_percpu_data(int cpu)
+static void fcoe_destroy_percpu_data(unsigned int cpu)
{
struct fc_lport *lp;
struct fcoe_softc *fc;
struct fcoe_softc *fc;
struct fcoe_dev_stats *stats;
struct fc_frame_header *fh;
- unsigned short oxid;
- int cpu_idx;
struct fcoe_percpu_s *fps;
+ unsigned short oxid;
+ unsigned int cpu_idx;
fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
lp = fc->lp;
{
struct fcoe_percpu_s *fps;
struct page *page;
- int cpu_idx;
+ unsigned int cpu_idx;
cpu_idx = get_cpu();
fps = fcoe_percpu[cpu_idx];
WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
- fc = fcoe_softc(lp);
+ fc = lport_priv(lp);
/*
* if it is a flogi then we need to learn gw-addr
* and my own fcid
/* crc offload */
if (likely(lp->crc_offload)) {
- skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb_headroom(skb);
skb->csum_offset = skb->len;
crc = 0;
if (skb_is_nonlinear(skb)) {
skb_frag_t *frag;
if (fcoe_get_paged_crc_eof(skb, tlen)) {
- kfree(skb);
+ kfree_skb(skb);
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->mac_len = elen;
- skb->protocol = htons(ETH_P_802_3);
+ skb->protocol = htons(ETH_P_FCOE);
skb->dev = fc->real_dev;
/* fill up mac and fcoe headers */
FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
hp->fcoe_sof = sof;
+#ifdef NETIF_F_FSO
+ /* fcoe lso, mss is in max_payload which is non-zero for FCP data */
+ if (lp->seq_offload && fr_max_payload(fp)) {
+ skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
+ skb_shinfo(skb)->gso_size = fr_max_payload(fp);
+ } else {
+ skb_shinfo(skb)->gso_type = 0;
+ skb_shinfo(skb)->gso_size = 0;
+ }
+#endif
/* update tx stats: regardless if LLD fails */
stats = lp->dev_stats[smp_processor_id()];
if (stats) {
rc = fcoe_start_io(skb);
if (rc) {
- fcoe_insert_wait_queue(lp, skb);
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ __skb_queue_tail(&fc->fcoe_pending_queue, skb);
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
lp->qfull = 1;
}
struct fcoe_softc *fc;
struct fcoe_hdr *hp;
- set_user_nice(current, 19);
+ set_user_nice(current, -20);
while (!kthread_should_stop()) {
* it's solicited data, in which case, the FCP layer would
* check it during the copy.
*/
- if (lp->crc_offload)
+ if (lp->crc_offload && skb->ip_summed == CHECKSUM_UNNECESSARY)
fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
else
fr_flags(fp) |= FCPHF_CRC_UNCHECKED;
*/
void fcoe_watchdog(ulong vp)
{
- struct fc_lport *lp;
struct fcoe_softc *fc;
- int qfilled = 0;
read_lock(&fcoe_hostlist_lock);
list_for_each_entry(fc, &fcoe_hostlist, list) {
- lp = fc->lp;
- if (lp) {
- if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
- qfilled = 1;
- if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
- if (qfilled)
- lp->qfull = 0;
- }
- }
+ if (fc->lp)
+ fcoe_check_wait_queue(fc->lp);
}
read_unlock(&fcoe_hostlist_lock);
*
* This empties the wait_queue, dequeue the head of the wait_queue queue
* and calls fcoe_start_io() for each packet, if all skb have been
- * transmitted, return 0 if a error occurs, then restore wait_queue and
- * try again later.
+ * transmitted, return qlen or -1 if a error occurs, then restore
+ * wait_queue and try again later.
*
* The wait_queue is used when the skb transmit fails. skb will go
* in the wait_queue which will be emptied by the time function OR
*/
static int fcoe_check_wait_queue(struct fc_lport *lp)
{
- int rc;
+ struct fcoe_softc *fc = lport_priv(lp);
struct sk_buff *skb;
- struct fcoe_softc *fc;
+ int rc = -1;
- fc = fcoe_softc(lp);
spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ if (fc->fcoe_pending_queue_active)
+ goto out;
+ fc->fcoe_pending_queue_active = 1;
- /*
- * if interface pending queue full then set qfull in lport.
- */
- if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
- lp->qfull = 1;
- if (fc->fcoe_pending_queue.qlen) {
- while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
- spin_unlock_bh(&fc->fcoe_pending_queue.lock);
- rc = fcoe_start_io(skb);
- if (rc) {
- fcoe_insert_wait_queue_head(lp, skb);
- return rc;
- }
- spin_lock_bh(&fc->fcoe_pending_queue.lock);
- }
- if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
- lp->qfull = 0;
- }
- spin_unlock_bh(&fc->fcoe_pending_queue.lock);
- return fc->fcoe_pending_queue.qlen;
-}
+ while (fc->fcoe_pending_queue.qlen) {
+ /* keep qlen > 0 until fcoe_start_io succeeds */
+ fc->fcoe_pending_queue.qlen++;
+ skb = __skb_dequeue(&fc->fcoe_pending_queue);
-/**
- * fcoe_insert_wait_queue_head() - puts skb to fcoe pending queue head
- * @lp: the fc_port for this skb
- * @skb: the associated skb to be xmitted
- *
- * Returns: none
- */
-static void fcoe_insert_wait_queue_head(struct fc_lport *lp,
- struct sk_buff *skb)
-{
- struct fcoe_softc *fc;
-
- fc = fcoe_softc(lp);
- spin_lock_bh(&fc->fcoe_pending_queue.lock);
- __skb_queue_head(&fc->fcoe_pending_queue, skb);
- spin_unlock_bh(&fc->fcoe_pending_queue.lock);
-}
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+ rc = fcoe_start_io(skb);
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
-/**
- * fcoe_insert_wait_queue() - put the skb into fcoe pending queue tail
- * @lp: the fc_port for this skb
- * @skb: the associated skb to be xmitted
- *
- * Returns: none
- */
-static void fcoe_insert_wait_queue(struct fc_lport *lp,
- struct sk_buff *skb)
-{
- struct fcoe_softc *fc;
+ if (rc) {
+ __skb_queue_head(&fc->fcoe_pending_queue, skb);
+ /* undo temporary increment above */
+ fc->fcoe_pending_queue.qlen--;
+ break;
+ }
+ /* undo temporary increment above */
+ fc->fcoe_pending_queue.qlen--;
+ }
- fc = fcoe_softc(lp);
- spin_lock_bh(&fc->fcoe_pending_queue.lock);
- __skb_queue_tail(&fc->fcoe_pending_queue, skb);
+ if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
+ lp->qfull = 0;
+ fc->fcoe_pending_queue_active = 0;
+ rc = fc->fcoe_pending_queue.qlen;
+out:
spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+ return rc;
}
/**
*/
int fcoe_link_ok(struct fc_lport *lp)
{
- struct fcoe_softc *fc = fcoe_softc(lp);
+ struct fcoe_softc *fc = lport_priv(lp);
struct net_device *dev = fc->real_dev;
struct ethtool_cmd ecmd = { ETHTOOL_GSET };
int rc = 0;
fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
if (!fc) {
- fc = fcoe_softc(lp);
+ fc = lport_priv(lp);
write_lock_bh(&fcoe_hostlist_lock);
list_add_tail(&fc->list, &fcoe_hostlist);
write_unlock_bh(&fcoe_hostlist_lock);
*/
static int __init fcoe_init(void)
{
- int cpu;
+ unsigned int cpu;
struct fcoe_percpu_s *p;
-
INIT_LIST_HEAD(&fcoe_hostlist);
rwlock_init(&fcoe_hostlist_lock);
register_cpu_notifier(&fcoe_cpu_notifier);
#endif /* CONFIG_HOTPLUG_CPU */
+ for_each_possible_cpu(cpu) {
+ p = fcoe_percpu[cpu];
+ p->cpu = cpu;
+ skb_queue_head_init(&p->fcoe_rx_list);
+ }
+
/*
* initialize per CPU interrupt thread
*/
* initialize the semaphore and skb queue head
*/
if (likely(!IS_ERR(p->thread))) {
- p->cpu = cpu;
fcoe_percpu[cpu] = p;
- skb_queue_head_init(&p->fcoe_rx_list);
kthread_bind(p->thread, cpu);
wake_up_process(p->thread);
} else {
*/
fcoe_dev_setup();
- init_timer(&fcoe_timer);
- fcoe_timer.data = 0;
- fcoe_timer.function = fcoe_watchdog;
- fcoe_timer.expires = (jiffies + (10 * HZ));
- add_timer(&fcoe_timer);
+ setup_timer(&fcoe_timer, fcoe_watchdog, 0);
+
+ mod_timer(&fcoe_timer, jiffies + (10 * HZ));
/* initiatlize the fcoe transport */
fcoe_transport_init();