static int port_name_cnt;
static LIST_HEAD(adapter_list);
-u64 ehea_driver_flags;
+static unsigned long ehea_driver_flags;
struct work_struct ehea_rereg_mr_task;
static DEFINE_MUTEX(dlpar_mem_lock);
struct ehea_fw_handle_array ehea_fw_handles;
int x;
unsigned char *deb = adr;
for (x = 0; x < len; x += 16) {
- printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
+ printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
deb += 16;
}
int num_fw_handles, k, l;
/* Determine number of handles */
+ mutex_lock(&ehea_fw_handles.lock);
+
list_for_each_entry(adapter, &adapter_list, list) {
num_adapters++;
if (num_fw_handles) {
arr = kzalloc(num_fw_handles * sizeof(*arr), GFP_KERNEL);
if (!arr)
- return; /* Keep the existing array */
+ goto out; /* Keep the existing array */
} else
goto out_update;
list_for_each_entry(adapter, &adapter_list, list) {
+ if (num_adapters == 0)
+ break;
+
for (k = 0; k < EHEA_MAX_PORTS; k++) {
struct ehea_port *port = adapter->port[k];
- if (!port || (port->state != EHEA_PORT_UP))
+ if (!port || (port->state != EHEA_PORT_UP) ||
+ (num_ports == 0))
continue;
for (l = 0;
}
arr[i].adh = adapter->handle;
arr[i++].fwh = port->qp_eq->fw_handle;
+ num_ports--;
}
arr[i].adh = adapter->handle;
arr[i].adh = adapter->handle;
arr[i++].fwh = adapter->mr.handle;
}
+ num_adapters--;
}
out_update:
kfree(ehea_fw_handles.arr);
ehea_fw_handles.arr = arr;
ehea_fw_handles.num_entries = i;
+out:
+ mutex_unlock(&ehea_fw_handles.lock);
}
static void ehea_update_bcmc_registrations(void)
{
+ unsigned long flags;
struct ehea_bcmc_reg_entry *arr = NULL;
struct ehea_adapter *adapter;
struct ehea_mc_list *mc_entry;
int i = 0;
int k;
+ spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
+
/* Determine number of registrations */
list_for_each_entry(adapter, &adapter_list, list)
for (k = 0; k < EHEA_MAX_PORTS; k++) {
if (num_registrations) {
arr = kzalloc(num_registrations * sizeof(*arr), GFP_ATOMIC);
if (!arr)
- return; /* Keep the existing array */
+ goto out; /* Keep the existing array */
} else
goto out_update;
if (!port || (port->state != EHEA_PORT_UP))
continue;
+ if (num_registrations == 0)
+ goto out_update;
+
arr[i].adh = adapter->handle;
arr[i].port_id = port->logical_port_id;
arr[i].reg_type = EHEA_BCMC_BROADCAST |
arr[i].reg_type = EHEA_BCMC_BROADCAST |
EHEA_BCMC_VLANID_ALL;
arr[i++].macaddr = port->mac_addr;
+ num_registrations -= 2;
list_for_each_entry(mc_entry,
&port->mc_list->list, list) {
+ if (num_registrations == 0)
+ goto out_update;
+
arr[i].adh = adapter->handle;
arr[i].port_id = port->logical_port_id;
arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
EHEA_BCMC_MULTICAST |
EHEA_BCMC_VLANID_ALL;
arr[i++].macaddr = mc_entry->macaddr;
+ num_registrations -= 2;
}
}
}
kfree(ehea_bcmc_regs.arr);
ehea_bcmc_regs.arr = arr;
ehea_bcmc_regs.num_entries = i;
+out:
+ spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
}
static struct net_device_stats *ehea_get_stats(struct net_device *dev)
memset(stats, 0, sizeof(*stats));
- cb2 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ cb2 = (void *)get_zeroed_page(GFP_ATOMIC);
if (!cb2) {
ehea_error("no mem for cb2");
goto out;
stats->rx_packets = rx_packets;
out_herr:
- kfree(cb2);
+ free_page((unsigned long)cb2);
out:
return stats;
}
EHEA_L_PKT_SIZE);
if (!skb_arr_rq1[index]) {
pr->rq1_skba.os_skbs = fill_wqes - i;
- ehea_error("%s: no mem for skb/%d wqes filled",
- dev->name, i);
break;
}
}
ehea_update_rq1a(pr->qp, adder);
}
-static int ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
+static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
{
- int ret = 0;
struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
struct net_device *dev = pr->port->netdev;
int i;
for (i = 0; i < pr->rq1_skba.len; i++) {
skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
- if (!skb_arr_rq1[i]) {
- ehea_error("%s: no mem for skb/%d wqes filled",
- dev->name, i);
- ret = -ENOMEM;
- goto out;
- }
+ if (!skb_arr_rq1[i])
+ break;
}
/* Ring doorbell */
ehea_update_rq1a(pr->qp, nr_rq1a);
-out:
- return ret;
}
static int ehea_refill_rq_def(struct ehea_port_res *pr,
max_index_mask = q_skba->len - 1;
for (i = 0; i < fill_wqes; i++) {
u64 tmp_addr;
- struct sk_buff *skb = netdev_alloc_skb(dev, packet_size);
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb_ip_align(dev, packet_size);
if (!skb) {
- ehea_error("%s: no mem for skb/%d wqes filled",
- pr->port->netdev->name, i);
q_skba->os_skbs = fill_wqes - i;
- ret = -ENOMEM;
+ if (q_skba->os_skbs == q_skba->len - 2) {
+ ehea_info("%s: rq%i ran dry - no mem for skb",
+ pr->port->netdev->name, rq_nr);
+ ret = -ENOMEM;
+ }
break;
}
- skb_reserve(skb, NET_IP_ALIGN);
skb_arr[index] = skb;
tmp_addr = ehea_map_vaddr(skb->data);
{
return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
nr_of_wqes, EHEA_RWQE2_TYPE,
- EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN);
+ EHEA_RQ2_PKT_SIZE);
}
{
return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
nr_of_wqes, EHEA_RWQE3_TYPE,
- EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN);
+ EHEA_MAX_PACKET_SIZE);
}
static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
x &= (arr_len - 1);
pref = skb_array[x];
- prefetchw(pref);
- prefetchw(pref + EHEA_CACHE_LINE);
-
- pref = (skb_array[x]->data);
- prefetch(pref);
- prefetch(pref + EHEA_CACHE_LINE);
- prefetch(pref + EHEA_CACHE_LINE * 2);
- prefetch(pref + EHEA_CACHE_LINE * 3);
+ if (pref) {
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
+
+ pref = (skb_array[x]->data);
+ prefetch(pref);
+ prefetch(pref + EHEA_CACHE_LINE);
+ prefetch(pref + EHEA_CACHE_LINE * 2);
+ prefetch(pref + EHEA_CACHE_LINE * 3);
+ }
+
skb = skb_array[skb_index];
skb_array[skb_index] = NULL;
return skb;
x &= (arr_len - 1);
pref = skb_array[x];
- prefetchw(pref);
- prefetchw(pref + EHEA_CACHE_LINE);
+ if (pref) {
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
- pref = (skb_array[x]->data);
- prefetchw(pref);
- prefetchw(pref + EHEA_CACHE_LINE);
+ pref = (skb_array[x]->data);
+ prefetchw(pref);
+ prefetchw(pref + EHEA_CACHE_LINE);
+ }
skb = skb_array[wqe_index];
skb_array[wqe_index] = NULL;
static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
struct sk_buff *skb)
{
- int vlan_extracted = (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
- && pr->port->vgrp;
+ int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
+ pr->port->vgrp);
if (use_lro) {
if (vlan_extracted)
}
ehea_proc_skb(pr, cqe, skb);
- dev->last_rx = jiffies;
} else {
pr->p_stats.poll_receive_errors++;
port_reset = ehea_treat_poll_error(pr, rq, cqe,
while ((rx != budget) || force_irq) {
pr->poll_counter = 0;
force_irq = 0;
- netif_rx_complete(dev, napi);
+ napi_complete(napi);
ehea_reset_cq_ep(pr->recv_cq);
ehea_reset_cq_ep(pr->send_cq);
ehea_reset_cq_n1(pr->recv_cq);
if (!cqe && !cqe_skb)
return rx;
- if (!netif_rx_reschedule(dev, napi))
+ if (!napi_reschedule(napi))
return rx;
cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
int i;
for (i = 0; i < port->num_def_qps; i++)
- netif_rx_schedule(dev, &port->port_res[i].napi);
+ napi_schedule(&port->port_res[i].napi);
}
#endif
{
struct ehea_port_res *pr = param;
- netif_rx_schedule(pr->port->netdev, &pr->napi);
+ napi_schedule(&pr->napi);
return IRQ_HANDLED;
}
while (eqe) {
qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
- ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
+ ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
eqe->entry, qp_token);
qp = port->port_res[qp_token].qp;
struct hcp_ehea_port_cb0 *cb0;
/* may be called via ehea_neq_tasklet() */
- cb0 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
out_free:
if (ret || netif_msg_probe(port))
ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
- kfree(cb0);
+ free_page((unsigned long)cb0);
out:
return ret;
}
u64 hret;
int ret = 0;
- cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb4 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb4) {
ehea_error("no mem for cb4");
ret = -ENOMEM;
if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
netif_carrier_on(port->netdev);
- kfree(cb4);
+ free_page((unsigned long)cb4);
out:
return ret;
}
netif_stop_queue(port->netdev);
break;
default:
- ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
+ ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
break;
}
}
int ret;
struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
- ret = ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
- - init_attr->act_nr_rwqes_rq2
- - init_attr->act_nr_rwqes_rq3 - 1);
+ ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
+ - init_attr->act_nr_rwqes_rq2
+ - init_attr->act_nr_rwqes_rq3 - 1);
- ret |= ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
+ ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
struct hcp_ehea_port_cb0 *cb0;
ret = -ENOMEM;
- cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0)
goto out;
ret = 0;
out_free:
- kfree(cb0);
+ free_page((unsigned long)cb0);
out:
return ret;
}
int ehea_rem_smrs(struct ehea_port_res *pr)
{
- if ((ehea_rem_mr(&pr->send_mr))
- || (ehea_rem_mr(&pr->recv_mr)))
+ if ((ehea_rem_mr(&pr->send_mr)) ||
+ (ehea_rem_mr(&pr->recv_mr)))
return -EIO;
else
return 0;
{
int ret, i;
+ if (pr->qp)
+ netif_napi_del(&pr->napi);
+
ret = ehea_destroy_qp(pr->qp);
if (!ret) {
goto out;
}
- cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0) {
ehea_error("no mem for cb0");
ret = -ENOMEM;
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
- spin_lock(&ehea_bcmc_regs.lock);
-
/* Deregister old MAC in pHYP */
if (port->state == EHEA_PORT_UP) {
ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
out_upregs:
ehea_update_bcmc_registrations();
- spin_unlock(&ehea_bcmc_regs.lock);
out_free:
- kfree(cb0);
+ free_page((unsigned long)cb0);
out:
return ret;
}
if ((enable && port->promisc) || (!enable && !port->promisc))
return;
- cb7 = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
if (!cb7) {
ehea_error("no mem for cb7");
goto out;
port->promisc = enable;
out:
- kfree(cb7);
+ free_page((unsigned long)cb7);
return;
}
}
ehea_promiscuous(dev, 0);
- spin_lock(&ehea_bcmc_regs.lock);
-
if (dev->flags & IFF_ALLMULTI) {
ehea_allmulti(dev, 1);
goto out;
}
ehea_allmulti(dev, 0);
- if (dev->mc_count) {
+ if (!netdev_mc_empty(dev)) {
ret = ehea_drop_multicast_list(dev);
if (ret) {
/* Dropping the current multicast list failed.
ehea_allmulti(dev, 1);
}
- if (dev->mc_count > port->adapter->max_mc_mac) {
- ehea_info("Mcast registration limit reached (0x%lx). "
+ if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
+ ehea_info("Mcast registration limit reached (0x%llx). "
"Use ALLMULTI!",
port->adapter->max_mc_mac);
goto out;
}
- for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
+ for (i = 0, k_mcl_entry = dev->mc_list; i < netdev_mc_count(dev); i++,
k_mcl_entry = k_mcl_entry->next)
ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
}
out:
ehea_update_bcmc_registrations();
- spin_unlock(&ehea_bcmc_regs.lock);
return;
}
write_ip_start_end(swqe, skb);
if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF)
- || (iph->frag_off & IP_OFFSET))
+ if ((iph->frag_off & IP_MF) ||
+ (iph->frag_off & IP_OFFSET))
/* IP fragment, so don't change cs */
swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
else
write_tcp_offset_end(swqe, skb);
} else if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF)
- || (iph->frag_off & IP_OFFSET))
+ if ((iph->frag_off & IP_MF) ||
+ (iph->frag_off & IP_OFFSET))
/* IP fragment, so don't change cs */
swqe->tx_control |= EHEA_SWQE_CRC
| EHEA_SWQE_IMM_DATA_PRESENT;
port->vgrp = grp;
- cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
if (hret != H_SUCCESS)
ehea_error("modify_ehea_port failed");
- kfree(cb1);
+ free_page((unsigned long)cb1);
out:
return;
}
int index;
u64 hret;
- cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
if (hret != H_SUCCESS)
ehea_error("modify_ehea_port failed");
out:
- kfree(cb1);
+ free_page((unsigned long)cb1);
return;
}
vlan_group_set_device(port->vgrp, vid, NULL);
- cb1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
ehea_error("no mem for cb1");
goto out;
if (hret != H_SUCCESS)
ehea_error("modify_ehea_port failed");
out:
- kfree(cb1);
+ free_page((unsigned long)cb1);
return;
}
u64 dummy64 = 0;
struct hcp_modify_qp_cb0 *cb0;
- cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0) {
ret = -ENOMEM;
goto out;
ret = 0;
out:
- kfree(cb0);
+ free_page((unsigned long)cb0);
return ret;
}
if (port->state == EHEA_PORT_UP)
return 0;
- mutex_lock(&ehea_fw_handles.lock);
-
ret = ehea_port_res_setup(port, port->num_def_qps,
port->num_add_tx_qps);
if (ret) {
}
}
- spin_lock(&ehea_bcmc_regs.lock);
-
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
if (ret) {
ret = -EIO;
ehea_info("Failed starting %s. ret=%i", dev->name, ret);
ehea_update_bcmc_registrations();
- spin_unlock(&ehea_bcmc_regs.lock);
-
ehea_update_firmware_handles();
- mutex_unlock(&ehea_fw_handles.lock);
return ret;
}
if (port->state == EHEA_PORT_DOWN)
return 0;
- mutex_lock(&ehea_fw_handles.lock);
-
- spin_lock(&ehea_bcmc_regs.lock);
ehea_drop_multicast_list(dev);
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
port->state = EHEA_PORT_DOWN;
ehea_update_bcmc_registrations();
- spin_unlock(&ehea_bcmc_regs.lock);
ret = ehea_clean_all_portres(port);
if (ret)
dev->name, ret);
ehea_update_firmware_handles();
- mutex_unlock(&ehea_fw_handles.lock);
return ret;
}
u64 dummy64 = 0;
u16 dummy16 = 0;
- cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0) {
ret = -ENOMEM;
goto out;
ret = 0;
out:
- kfree(cb0);
+ free_page((unsigned long)cb0);
return ret;
}
u64 dummy64 = 0;
u16 dummy16 = 0;
- cb0 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb0 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb0) {
ret = -ENOMEM;
goto out;
ehea_refill_rq3(pr, 0);
}
out:
- kfree(cb0);
+ free_page((unsigned long)cb0);
return ret;
}
struct ehea_adapter *adapter;
mutex_lock(&dlpar_mem_lock);
- ehea_info("LPAR memory enlarged - re-initializing driver");
+ ehea_info("LPAR memory changed - re-initializing driver");
list_for_each_entry(adapter, &adapter_list, list)
if (adapter->active_ports) {
}
}
- ehea_destroy_busmap();
- ret = ehea_create_busmap();
- if (ret) {
- ehea_error("creating ehea busmap failed");
- goto out;
- }
-
clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
list_for_each_entry(adapter, &adapter_list, list)
u64 hret;
int ret;
- cb = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb) {
ret = -ENOMEM;
goto out;
ret = 0;
out_herr:
- kfree(cb);
+ free_page((unsigned long)cb);
out:
return ret;
}
*jumbo = 0;
/* (Try to) enable *jumbo frames */
- cb4 = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ cb4 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb4) {
ehea_error("no mem for cb4");
ret = -ENOMEM;
} else
ret = -EINVAL;
- kfree(cb4);
+ free_page((unsigned long)cb4);
}
out:
return ret;
port->ofdev.dev.parent = &port->adapter->ofdev->dev;
port->ofdev.dev.bus = &ibmebus_bus_type;
- sprintf(port->ofdev.dev.bus_id, "port%d", port_name_cnt++);
+ dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
port->ofdev.dev.release = logical_port_release;
ret = of_device_register(&port->ofdev);
of_device_unregister(&port->ofdev);
}
+static const struct net_device_ops ehea_netdev_ops = {
+ .ndo_open = ehea_open,
+ .ndo_stop = ehea_stop,
+ .ndo_start_xmit = ehea_start_xmit,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = ehea_netpoll,
+#endif
+ .ndo_get_stats = ehea_get_stats,
+ .ndo_set_mac_address = ehea_set_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = ehea_set_multicast_list,
+ .ndo_change_mtu = ehea_change_mtu,
+ .ndo_vlan_rx_register = ehea_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
+ .ndo_tx_timeout = ehea_tx_watchdog,
+};
+
struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
u32 logical_port_id,
struct device_node *dn)
/* initialize net_device structure */
memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
- dev->open = ehea_open;
-#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ehea_netpoll;
-#endif
- dev->stop = ehea_stop;
- dev->hard_start_xmit = ehea_start_xmit;
- dev->get_stats = ehea_get_stats;
- dev->set_multicast_list = ehea_set_multicast_list;
- dev->set_mac_address = ehea_set_mac_addr;
- dev->change_mtu = ehea_change_mtu;
- dev->vlan_rx_register = ehea_vlan_rx_register;
- dev->vlan_rx_add_vid = ehea_vlan_rx_add_vid;
- dev->vlan_rx_kill_vid = ehea_vlan_rx_kill_vid;
+ dev->netdev_ops = &ehea_netdev_ops;
+ ehea_set_ethtool_ops(dev);
+
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
| NETIF_F_LLTX;
- dev->tx_timeout = &ehea_tx_watchdog;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
INIT_WORK(&port->reset_task, ehea_reset_port);
- ehea_set_ethtool_ops(dev);
ret = register_netdev(dev);
if (ret) {
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ehea_adapter *adapter = dev->driver_data;
+ struct ehea_adapter *adapter = dev_get_drvdata(dev);
struct ehea_port *port;
struct device_node *eth_dn = NULL;
int i;
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct ehea_adapter *adapter = dev->driver_data;
+ struct ehea_adapter *adapter = dev_get_drvdata(dev);
struct ehea_port *port;
int i;
u32 logical_port_id;
ehea_error("Invalid ibmebus device probed");
return -EINVAL;
}
- mutex_lock(&ehea_fw_handles.lock);
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
adapter->pd = EHEA_PD_ID;
- dev->dev.driver_data = adapter;
+ dev_set_drvdata(&dev->dev, adapter);
/* initialize adapter and ports */
ehea_destroy_eq(adapter->neq);
out_free_ad:
+ list_del(&adapter->list);
kfree(adapter);
out:
ehea_update_firmware_handles();
- mutex_unlock(&ehea_fw_handles.lock);
+
return ret;
}
static int __devexit ehea_remove(struct of_device *dev)
{
- struct ehea_adapter *adapter = dev->dev.driver_data;
+ struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
int i;
for (i = 0; i < EHEA_MAX_PORTS; i++)
flush_scheduled_work();
- mutex_lock(&ehea_fw_handles.lock);
-
ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
tasklet_kill(&adapter->neq_tasklet);
kfree(adapter);
ehea_update_firmware_handles();
- mutex_unlock(&ehea_fw_handles.lock);
return 0;
}
static int ehea_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
+ struct memory_notify *arg = data;
switch (action) {
- case MEM_OFFLINE:
- ehea_info("memory has been removed");
+ case MEM_CANCEL_OFFLINE:
+ ehea_info("memory offlining canceled");
+ /* Readd canceled memory block */
+ case MEM_ONLINE:
+ ehea_info("memory is going online");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+ return NOTIFY_BAD;
+ ehea_rereg_mrs(NULL);
+ break;
+ case MEM_GOING_OFFLINE:
+ ehea_info("memory is going offline");
+ set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+ if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+ return NOTIFY_BAD;
ehea_rereg_mrs(NULL);
break;
default:
break;
}
+
+ ehea_update_firmware_handles();
+
return NOTIFY_OK;
}