summaryrefslogtreecommitdiff
path: root/drivers/net/ehea
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r--drivers/net/ehea/ehea.h36
-rw-r--r--drivers/net/ehea/ehea_hw.h24
-rw-r--r--drivers/net/ehea/ehea_main.c316
-rw-r--r--drivers/net/ehea/ehea_phyp.h3
-rw-r--r--drivers/net/ehea/ehea_qmr.c218
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
6 files changed, 441 insertions, 170 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index c0f81b5a30fb..8d58be56f4e3 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,13 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0064"
+#define DRV_VERSION "EHEA_0074"
+
+/* eHEA capability flags */
+#define DLPAR_PORT_ADD_REM 1
+#define DLPAR_MEM_ADD 2
+#define DLPAR_MEM_REM 4
+#define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM)
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -107,6 +113,8 @@
/* Memory Regions */
#define EHEA_MR_ACC_CTRL 0x00800000
+#define EHEA_BUSMAP_START 0x8000000000000000ULL
+
#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
/* utility functions */
@@ -136,10 +144,10 @@ void ehea_dump(void *adr, int len, char *msg);
(0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
#define EHEA_BMASK_SET(mask, value) \
- ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
+ ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
#define EHEA_BMASK_GET(mask, value) \
- (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
+ (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
/*
* Generic ehea page
@@ -180,6 +188,12 @@ struct h_epas {
set to 0 if unused */
};
+struct ehea_busmap {
+ unsigned int entries; /* total number of entries */
+ unsigned int valid_sections; /* number of valid sections */
+ u64 *vaddr;
+};
+
struct ehea_qp;
struct ehea_cq;
struct ehea_eq;
@@ -190,7 +204,7 @@ struct ehea_av;
* Queue attributes passed to ehea_create_qp()
*/
struct ehea_qp_init_attr {
- /* input parameter */
+ /* input parameter */
u32 qp_token; /* queue token */
u8 low_lat_rq1;
u8 signalingtype; /* cqe generation flag */
@@ -212,7 +226,7 @@ struct ehea_qp_init_attr {
u64 recv_cq_handle;
u64 aff_eq_handle;
- /* output parameter */
+ /* output parameter */
u32 qp_nr;
u16 act_nr_send_wqes;
u16 act_nr_rwqes_rq1;
@@ -279,12 +293,12 @@ struct ehea_qp {
* Completion Queue attributes
*/
struct ehea_cq_attr {
- /* input parameter */
+ /* input parameter */
u32 max_nr_of_cqes;
u32 cq_token;
u64 eq_handle;
- /* output parameter */
+ /* output parameter */
u32 act_nr_of_cqes;
u32 nr_pages;
};
@@ -376,6 +390,8 @@ struct ehea_adapter {
struct ehea_mr mr;
u32 pd; /* protection domain */
u64 max_mc_mac; /* max number of multicast mac addresses */
+ int active_ports;
+ struct list_head list;
};
@@ -386,6 +402,8 @@ struct ehea_mc_list {
#define EHEA_PORT_UP 1
#define EHEA_PORT_DOWN 0
+#define EHEA_PHY_LINK_UP 1
+#define EHEA_PHY_LINK_DOWN 0
#define EHEA_MAX_PORT_RES 16
struct ehea_port {
struct ehea_adapter *adapter; /* adapter that owns this port */
@@ -411,6 +429,7 @@ struct ehea_port {
u32 msg_enable;
u32 sig_comp_iv;
u32 state;
+ u8 phy_link;
u8 full_duplex;
u8 autoneg;
u8 num_def_qps;
@@ -425,6 +444,9 @@ struct port_res_cfg {
int max_entries_rq3;
};
+enum ehea_flag_bits {
+ __EHEA_STOP_XFER
+};
void ehea_set_ethtool_ops(struct net_device *netdev);
int ehea_sense_port_attr(struct ehea_port *port);
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h
index 1246757f2c22..1af7ca499ec5 100644
--- a/drivers/net/ehea/ehea_hw.h
+++ b/drivers/net/ehea/ehea_hw.h
@@ -211,34 +211,34 @@ static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
}
#define epa_store_eq(epa, offset, value)\
- epa_store(epa, EQTEMM_OFFSET(offset), value)
+ epa_store(epa, EQTEMM_OFFSET(offset), value)
#define epa_load_eq(epa, offset)\
- epa_load(epa, EQTEMM_OFFSET(offset))
+ epa_load(epa, EQTEMM_OFFSET(offset))
#define epa_store_cq(epa, offset, value)\
- epa_store(epa, CQTEMM_OFFSET(offset), value)
+ epa_store(epa, CQTEMM_OFFSET(offset), value)
#define epa_load_cq(epa, offset)\
- epa_load(epa, CQTEMM_OFFSET(offset))
+ epa_load(epa, CQTEMM_OFFSET(offset))
#define epa_store_qp(epa, offset, value)\
- epa_store(epa, QPTEMM_OFFSET(offset), value)
+ epa_store(epa, QPTEMM_OFFSET(offset), value)
#define epa_load_qp(epa, offset)\
- epa_load(epa, QPTEMM_OFFSET(offset))
+ epa_load(epa, QPTEMM_OFFSET(offset))
#define epa_store_qped(epa, offset, value)\
- epa_store(epa, QPEDMM_OFFSET(offset), value)
+ epa_store(epa, QPEDMM_OFFSET(offset), value)
#define epa_load_qped(epa, offset)\
- epa_load(epa, QPEDMM_OFFSET(offset))
+ epa_load(epa, QPEDMM_OFFSET(offset))
#define epa_store_mrmw(epa, offset, value)\
- epa_store(epa, MRMWMM_OFFSET(offset), value)
+ epa_store(epa, MRMWMM_OFFSET(offset), value)
#define epa_load_mrmw(epa, offset)\
- epa_load(epa, MRMWMM_OFFSET(offset))
+ epa_load(epa, MRMWMM_OFFSET(offset))
#define epa_store_base(epa, offset, value)\
- epa_store(epa, HCAGR_OFFSET(offset), value)
+ epa_store(epa, HCAGR_OFFSET(offset), value)
#define epa_load_base(epa, offset)\
- epa_load(epa, HCAGR_OFFSET(offset))
+ epa_load(epa, HCAGR_OFFSET(offset))
static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
{
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 9e13433a268a..717b12984d10 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -53,17 +53,21 @@ static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
static int sq_entries = EHEA_DEF_ENTRIES_SQ;
static int use_mcs = 0;
static int num_tx_qps = EHEA_NUM_TX_QP;
+static int prop_carrier_state = 0;
module_param(msg_level, int, 0);
module_param(rq1_entries, int, 0);
module_param(rq2_entries, int, 0);
module_param(rq3_entries, int, 0);
module_param(sq_entries, int, 0);
+module_param(prop_carrier_state, int, 0);
module_param(use_mcs, int, 0);
module_param(num_tx_qps, int, 0);
MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
MODULE_PARM_DESC(msg_level, "msg_level");
+MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
+ "port to stack. 1:yes, 0:no. Default = 0 ");
MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
"[2^x - 1], x = [6..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
@@ -76,12 +80,17 @@ MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
"[2^x - 1], x = [6..14]. Default = "
__MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
-MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
+MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
static int port_name_cnt = 0;
+static LIST_HEAD(adapter_list);
+u64 ehea_driver_flags = 0;
+struct workqueue_struct *ehea_driver_wq;
+struct work_struct ehea_rereg_mr_task;
+
static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
- const struct of_device_id *id);
+ const struct of_device_id *id);
static int __devexit ehea_remove(struct ibmebus_dev *dev);
@@ -236,15 +245,19 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
rwqe = ehea_get_next_rwqe(qp, rq_nr);
rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
- | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
+ | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
- rwqe->sg_list[0].vaddr = (u64)skb->data;
+ rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
rwqe->sg_list[0].len = packet_size;
rwqe->data_segments = 1;
index++;
index &= max_index_mask;
+
+ if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)))
+ goto out;
}
+
q_skba->index = index;
/* Ring doorbell */
@@ -253,7 +266,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
ehea_update_rq2a(pr->qp, i);
else
ehea_update_rq3a(pr->qp, i);
-
+out:
return ret;
}
@@ -427,7 +440,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
break;
}
skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
- cqe->num_bytes_transfered - 4);
+ cqe->num_bytes_transfered - 4);
ehea_fill_skb(port->netdev, skb, cqe);
} else if (rq == 2) { /* RQ2 */
skb = get_skb_by_index(skb_arr_rq2,
@@ -457,6 +470,8 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
cqe->vlan_tag);
else
netif_receive_skb(skb);
+
+ port->netdev->last_rx = jiffies;
} else {
pr->p_stats.poll_receive_errors++;
port_reset = ehea_treat_poll_error(pr, rq, cqe,
@@ -578,6 +593,23 @@ static int ehea_poll(struct net_device *dev, int *budget)
return 1;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void ehea_netpoll(struct net_device *dev)
+{
+ struct ehea_port *port = netdev_priv(dev);
+
+ netif_rx_schedule(port->port_res[0].d_netdev);
+}
+#endif
+
+static int ehea_poll_firstqueue(struct net_device *dev, int *budget)
+{
+ struct ehea_port *port = netdev_priv(dev);
+ struct net_device *d_dev = port->port_res[0].d_netdev;
+
+ return ehea_poll(d_dev, budget);
+}
+
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
{
struct ehea_port_res *pr = param;
@@ -618,7 +650,7 @@ static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
for (i = 0; i < EHEA_MAX_PORTS; i++)
if (adapter->port[i])
- if (adapter->port[i]->logical_port_id == logical_port)
+ if (adapter->port[i]->logical_port_id == logical_port)
return adapter->port[i];
return NULL;
}
@@ -786,7 +818,9 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
ehea_error("Failed setting port speed");
}
}
- netif_carrier_on(port->netdev);
+ if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
+ netif_carrier_on(port->netdev);
+
kfree(cb4);
out:
return ret;
@@ -841,13 +875,19 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
}
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
+ port->phy_link = EHEA_PHY_LINK_UP;
if (netif_msg_link(port))
ehea_info("%s: Physical port up",
port->netdev->name);
+ if (prop_carrier_state)
+ netif_carrier_on(port->netdev);
} else {
+ port->phy_link = EHEA_PHY_LINK_DOWN;
if (netif_msg_link(port))
ehea_info("%s: Physical port down",
port->netdev->name);
+ if (prop_carrier_state)
+ netif_carrier_off(port->netdev);
}
if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
@@ -1298,7 +1338,6 @@ static void write_swqe2_TSO(struct sk_buff *skb,
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
int skb_data_size = skb->len - skb->data_len;
int headersize;
- u64 tmp_addr;
/* Packet is TCP with TSO enabled */
swqe->tx_control |= EHEA_SWQE_TSO;
@@ -1319,9 +1358,8 @@ static void write_swqe2_TSO(struct sk_buff *skb,
/* set sg1entry data */
sg1entry->l_key = lkey;
sg1entry->len = skb_data_size - headersize;
-
- tmp_addr = (u64)(skb->data + headersize);
- sg1entry->vaddr = tmp_addr;
+ sg1entry->vaddr =
+ ehea_map_vaddr(skb->data + headersize);
swqe->descriptors++;
}
} else
@@ -1334,7 +1372,6 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
int skb_data_size = skb->len - skb->data_len;
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
- u64 tmp_addr;
/* Packet is any nonTSO type
*
@@ -1351,8 +1388,8 @@ static void write_swqe2_nonTSO(struct sk_buff *skb,
/* copy sg1entry data */
sg1entry->l_key = lkey;
sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
- tmp_addr = (u64)(skb->data + SWQE2_MAX_IMM);
- sg1entry->vaddr = tmp_addr;
+ sg1entry->vaddr =
+ ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
swqe->descriptors++;
}
} else {
@@ -1367,7 +1404,6 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
skb_frag_t *frag;
int nfrags, sg1entry_contains_frag_data, i;
- u64 tmp_addr;
nfrags = skb_shinfo(skb)->nr_frags;
sg1entry = &swqe->u.immdata_desc.sg_entry;
@@ -1389,9 +1425,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
/* copy sg1entry data */
sg1entry->l_key = lkey;
sg1entry->len = frag->size;
- tmp_addr = (u64)(page_address(frag->page)
- + frag->page_offset);
- sg1entry->vaddr = tmp_addr;
+ sg1entry->vaddr =
+ ehea_map_vaddr(page_address(frag->page)
+ + frag->page_offset);
swqe->descriptors++;
sg1entry_contains_frag_data = 1;
}
@@ -1403,10 +1439,9 @@ static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
sgentry->l_key = lkey;
sgentry->len = frag->size;
-
- tmp_addr = (u64)(page_address(frag->page)
- + frag->page_offset);
- sgentry->vaddr = tmp_addr;
+ sgentry->vaddr =
+ ehea_map_vaddr(page_address(frag->page)
+ + frag->page_offset);
swqe->descriptors++;
}
}
@@ -1424,7 +1459,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
port->logical_port_id,
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
- ehea_error("reg_dereg_bcmc failed (tagged)");
+ ehea_error("%sregistering bc address failed (tagged)",
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
goto out_herr;
}
@@ -1435,7 +1471,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
port->logical_port_id,
reg_type, port->mac_addr, 0, hcallid);
if (hret != H_SUCCESS) {
- ehea_error("reg_dereg_bcmc failed (vlan)");
+ ehea_error("%sregistering bc address failed (vlan)",
+ hcallid == H_REG_BCMC ? "" : "de");
ret = -EIO;
}
out_herr:
@@ -1695,6 +1732,7 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
{
if (skb->protocol == htons(ETH_P_IP)) {
const struct iphdr *iph = ip_hdr(skb);
+
/* IPv4 */
swqe->tx_control |= EHEA_SWQE_CRC
| EHEA_SWQE_IP_CHECKSUM
@@ -1705,13 +1743,12 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
write_ip_start_end(swqe, skb);
if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF) ||
- (iph->frag_off & IP_OFFSET))
+ if ((iph->frag_off & IP_MF)
+ || (iph->frag_off & IP_OFFSET))
/* IP fragment, so don't change cs */
swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
else
write_udp_offset_end(swqe, skb);
-
} else if (iph->protocol == IPPROTO_TCP) {
write_tcp_offset_end(swqe, skb);
}
@@ -1739,6 +1776,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
if (skb->protocol == htons(ETH_P_IP)) {
const struct iphdr *iph = ip_hdr(skb);
+
/* IPv4 */
write_ip_start_end(swqe, skb);
@@ -1751,8 +1789,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
write_tcp_offset_end(swqe, skb);
} else if (iph->protocol == IPPROTO_UDP) {
- if ((iph->frag_off & IP_MF) ||
- (iph->frag_off & IP_OFFSET))
+ if ((iph->frag_off & IP_MF)
+ || (iph->frag_off & IP_OFFSET))
/* IP fragment, so don't change cs */
swqe->tx_control |= EHEA_SWQE_CRC
| EHEA_SWQE_IMM_DATA_PRESENT;
@@ -1877,6 +1915,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
ehea_dump(swqe, 512, "swqe");
}
+ if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags)))
+ goto out;
+
ehea_post_swqe(pr->qp, swqe);
pr->tx_packets++;
@@ -1891,7 +1932,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
dev->trans_start = jiffies;
spin_unlock(&pr->xmit_lock);
-
+out:
return NETDEV_TX_OK;
}
@@ -2131,24 +2172,18 @@ static int ehea_clean_all_portres(struct ehea_port *port)
return ret;
}
-static void ehea_remove_adapter_mr (struct ehea_adapter *adapter)
+static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
{
- int i;
-
- for (i=0; i < EHEA_MAX_PORTS; i++)
- if (adapter->port[i])
- return;
+ if (adapter->active_ports)
+ return;
ehea_rem_mr(&adapter->mr);
}
-static int ehea_add_adapter_mr (struct ehea_adapter *adapter)
+static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
{
- int i;
-
- for (i=0; i < EHEA_MAX_PORTS; i++)
- if (adapter->port[i])
- return 0;
+ if (adapter->active_ports)
+ return 0;
return ehea_reg_kernel_mr(adapter, &adapter->mr);
}
@@ -2157,7 +2192,6 @@ static int ehea_up(struct net_device *dev)
{
int ret, i;
struct ehea_port *port = netdev_priv(dev);
- u64 mac_addr = 0;
if (port->state == EHEA_PORT_UP)
return 0;
@@ -2176,18 +2210,10 @@ static int ehea_up(struct net_device *dev)
goto out_clean_pr;
}
- ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
- if (ret) {
- ret = -EIO;
- ehea_error("out_clean_pr");
- goto out_clean_pr;
- }
- mac_addr = (*(u64*)dev->dev_addr) >> 16;
-
ret = ehea_reg_interrupts(dev);
if (ret) {
- ehea_error("out_dereg_bc");
- goto out_dereg_bc;
+ ehea_error("reg_interrupts failed. ret:%d", ret);
+ goto out_clean_pr;
}
for(i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
@@ -2213,12 +2239,12 @@ static int ehea_up(struct net_device *dev)
out_free_irqs:
ehea_free_interrupts(dev);
-out_dereg_bc:
- ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
-
out_clean_pr:
ehea_clean_all_portres(port);
out:
+ if (ret)
+ ehea_info("Failed starting %s. ret=%i", dev->name, ret);
+
return ret;
}
@@ -2257,9 +2283,13 @@ static int ehea_down(struct net_device *dev)
&port->port_res[i].d_netdev->state))
msleep(1);
- ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
- ret = ehea_clean_all_portres(port);
port->state = EHEA_PORT_DOWN;
+
+ ret = ehea_clean_all_portres(port);
+ if (ret)
+ ehea_info("Failed freeing resources for %s. ret=%i",
+ dev->name, ret);
+
return ret;
}
@@ -2291,15 +2321,11 @@ static void ehea_reset_port(struct work_struct *work)
netif_stop_queue(dev);
netif_poll_disable(dev);
- ret = ehea_down(dev);
- if (ret)
- ehea_error("ehea_down failed. not all resources are freed");
+ ehea_down(dev);
ret = ehea_up(dev);
- if (ret) {
- ehea_error("Reset device %s failed: ret=%d", dev->name, ret);
+ if (ret)
goto out;
- }
if (netif_msg_timer(port))
ehea_info("Device %s resetted successfully", dev->name);
@@ -2311,6 +2337,88 @@ out:
return;
}
+static void ehea_rereg_mrs(struct work_struct *work)
+{
+ int ret, i;
+ struct ehea_adapter *adapter;
+
+ ehea_info("LPAR memory enlarged - re-initializing driver");
+
+ list_for_each_entry(adapter, &adapter_list, list)
+ if (adapter->active_ports) {
+ /* Shutdown all ports */
+ for (i = 0; i < EHEA_MAX_PORTS; i++) {
+ struct ehea_port *port = adapter->port[i];
+
+ if (port) {
+ struct net_device *dev = port->netdev;
+
+ if (dev->flags & IFF_UP) {
+ ehea_info("stopping %s",
+ dev->name);
+ down(&port->port_lock);
+ netif_stop_queue(dev);
+ netif_poll_disable(dev);
+ ehea_down(dev);
+ up(&port->port_lock);
+ }
+ }
+ }
+
+ /* Unregister old memory region */
+ ret = ehea_rem_mr(&adapter->mr);
+ if (ret) {
+ ehea_error("unregister MR failed - driver"
+ " inoperable!");
+ goto out;
+ }
+ }
+
+ ehea_destroy_busmap();
+
+ ret = ehea_create_busmap();
+ if (ret)
+ goto out;
+
+ clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
+
+ list_for_each_entry(adapter, &adapter_list, list)
+ if (adapter->active_ports) {
+ /* Register new memory region */
+ ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
+ if (ret) {
+ ehea_error("register MR failed - driver"
+ " inoperable!");
+ goto out;
+ }
+
+ /* Restart all ports */
+ for (i = 0; i < EHEA_MAX_PORTS; i++) {
+ struct ehea_port *port = adapter->port[i];
+
+ if (port) {
+ struct net_device *dev = port->netdev;
+
+ if (dev->flags & IFF_UP) {
+ ehea_info("restarting %s",
+ dev->name);
+ down(&port->port_lock);
+
+ ret = ehea_up(dev);
+ if (!ret) {
+ netif_poll_enable(dev);
+ netif_wake_queue(dev);
+ }
+
+ up(&port->port_lock);
+ }
+ }
+ }
+ }
+out:
+ return;
+}
+
static void ehea_tx_watchdog(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
@@ -2394,7 +2502,7 @@ static ssize_t ehea_show_port_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
- return sprintf(buf, "0x%X", port->logical_port_id);
+ return sprintf(buf, "%d", port->logical_port_id);
}
static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
@@ -2407,7 +2515,7 @@ static void __devinit logical_port_release(struct device *dev)
}
static int ehea_driver_sysfs_add(struct device *dev,
- struct device_driver *driver)
+ struct device_driver *driver)
{
int ret;
@@ -2424,7 +2532,7 @@ static int ehea_driver_sysfs_add(struct device *dev,
}
static void ehea_driver_sysfs_remove(struct device *dev,
- struct device_driver *driver)
+ struct device_driver *driver)
{
struct device_driver *drv = driver;
@@ -2453,7 +2561,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
}
ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
- if (ret) {
+ if (ret) {
ehea_error("failed to register attributes, ret=%d", ret);
goto out_unreg_of_dev;
}
@@ -2536,7 +2644,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
dev->open = ehea_open;
- dev->poll = ehea_poll;
+ dev->poll = ehea_poll_firstqueue;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ehea_netpoll;
+#endif
dev->weight = 64;
dev->stop = ehea_stop;
dev->hard_start_xmit = ehea_start_xmit;
@@ -2556,12 +2667,18 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
INIT_WORK(&port->reset_task, ehea_reset_port);
+ ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
+ if (ret) {
+ ret = -EIO;
+ goto out_unreg_port;
+ }
+
ehea_set_ethtool_ops(dev);
ret = register_netdev(dev);
if (ret) {
ehea_error("register_netdev failed. ret=%d", ret);
- goto out_unreg_port;
+ goto out_dereg_bc;
}
ret = ehea_get_jumboframe_status(port, &jumbo);
@@ -2572,8 +2689,13 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
ehea_info("%s: Jumbo frames are %sabled", dev->name,
jumbo == 1 ? "en" : "dis");
+ adapter->active_ports++;
+
return port;
+out_dereg_bc:
+ ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
+
out_unreg_port:
ehea_unregister_port(port);
@@ -2593,14 +2715,17 @@ static void ehea_shutdown_single_port(struct ehea_port *port)
{
unregister_netdev(port->netdev);
ehea_unregister_port(port);
+ ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
kfree(port->mc_list);
free_netdev(port->netdev);
+ port->adapter->active_ports--;
}
static int ehea_setup_ports(struct ehea_adapter *adapter)
{
struct device_node *lhea_dn;
struct device_node *eth_dn = NULL;
+
const u32 *dn_log_port_id;
int i = 0;
@@ -2608,7 +2733,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
- NULL);
+ NULL);
if (!dn_log_port_id) {
ehea_error("bad device node: eth_dn name=%s",
eth_dn->full_name);
@@ -2648,7 +2773,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
- NULL);
+ NULL);
if (dn_log_port_id)
if (*dn_log_port_id == logical_port_id)
return eth_dn;
@@ -2668,7 +2793,7 @@ static ssize_t ehea_probe_port(struct device *dev,
u32 logical_port_id;
- sscanf(buf, "%X", &logical_port_id);
+ sscanf(buf, "%d", &logical_port_id);
port = ehea_get_port(adapter, logical_port_id);
@@ -2721,7 +2846,7 @@ static ssize_t ehea_remove_port(struct device *dev,
int i;
u32 logical_port_id;
- sscanf(buf, "%X", &logical_port_id);
+ sscanf(buf, "%d", &logical_port_id);
port = ehea_get_port(adapter, logical_port_id);
@@ -2786,10 +2911,12 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
goto out;
}
+ list_add(&adapter->list, &adapter_list);
+
adapter->ebus_dev = dev;
adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle",
- NULL);
+ NULL);
if (adapter_handle)
adapter->handle = *adapter_handle;
@@ -2889,7 +3016,10 @@ static int __devexit ehea_remove(struct ibmebus_dev *dev)
ehea_destroy_eq(adapter->neq);
ehea_remove_adapter_mr(adapter);
+ list_del(&adapter->list);
+
kfree(adapter);
+
return 0;
}
@@ -2921,6 +3051,15 @@ static int check_module_parm(void)
return ret;
}
+static ssize_t ehea_show_capabilities(struct device_driver *drv,
+ char *buf)
+{
+ return sprintf(buf, "%d", EHEA_CAPABILITIES);
+}
+
+static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
+ ehea_show_capabilities, NULL);
+
int __init ehea_module_init(void)
{
int ret;
@@ -2928,12 +3067,32 @@ int __init ehea_module_init(void)
printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
DRV_VERSION);
+ ehea_driver_wq = create_workqueue("ehea_driver_wq");
+
+ INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
+
ret = check_module_parm();
if (ret)
goto out;
- ret = ibmebus_register_driver(&ehea_driver);
+
+ ret = ehea_create_busmap();
if (ret)
+ goto out;
+
+ ret = ibmebus_register_driver(&ehea_driver);
+ if (ret) {
ehea_error("failed registering eHEA device driver on ebus");
+ goto out;
+ }
+
+ ret = driver_create_file(&ehea_driver.driver,
+ &driver_attr_capabilities);
+ if (ret) {
+ ehea_error("failed to register capabilities attribute, ret=%d",
+ ret);
+ ibmebus_unregister_driver(&ehea_driver);
+ goto out;
+ }
out:
return ret;
@@ -2941,7 +3100,10 @@ out:
static void __exit ehea_module_exit(void)
{
+ destroy_workqueue(ehea_driver_wq);
+ driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
ibmebus_unregister_driver(&ehea_driver);
+ ehea_destroy_busmap();
}
module_init(ehea_module_init);
diff --git a/drivers/net/ehea/ehea_phyp.h b/drivers/net/ehea/ehea_phyp.h
index d17a45a7e717..89b63531ff26 100644
--- a/drivers/net/ehea/ehea_phyp.h
+++ b/drivers/net/ehea/ehea_phyp.h
@@ -60,6 +60,9 @@ static inline u32 get_longbusy_msecs(int long_busy_ret_code)
}
}
+/* Number of pages which can be registered at once by H_REGISTER_HEA_RPAGES */
+#define EHEA_MAX_RPAGE 512
+
/* Notification Event Queue (NEQ) Entry bit masks */
#define NEQE_EVENT_CODE EHEA_BMASK_IBM(2, 7)
#define NEQE_PORTNUM EHEA_BMASK_IBM(32, 47)
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index f24a8862977d..c82e24596074 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -31,6 +31,13 @@
#include "ehea_phyp.h"
#include "ehea_qmr.h"
+
+struct ehea_busmap ehea_bmap = { 0, 0, NULL };
+extern u64 ehea_driver_flags;
+extern struct workqueue_struct *ehea_driver_wq;
+extern struct work_struct ehea_rereg_mr_task;
+
+
static void *hw_qpageit_get_inc(struct hw_queue *queue)
{
void *retvalue = hw_qeit_get(queue);
@@ -211,7 +218,7 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
u64 hret;
u64 adapter_handle = cq->adapter->handle;
- /* deregister all previous registered pages */
+ /* deregister all previous registered pages */
hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
if (hret != H_SUCCESS)
return hret;
@@ -228,6 +235,8 @@ int ehea_destroy_cq(struct ehea_cq *cq)
if (!cq)
return 0;
+ hcp_epas_dtor(&cq->epas);
+
if ((hret = ehea_destroy_cq_res(cq, NORMAL_FREE)) == H_R_STATE) {
ehea_error_data(cq->adapter, cq->fw_handle);
hret = ehea_destroy_cq_res(cq, FORCE_FREE);
@@ -354,6 +363,8 @@ int ehea_destroy_eq(struct ehea_eq *eq)
if (!eq)
return 0;
+ hcp_epas_dtor(&eq->epas);
+
if ((hret = ehea_destroy_eq_res(eq, NORMAL_FREE)) == H_R_STATE) {
ehea_error_data(eq->adapter, eq->fw_handle);
hret = ehea_destroy_eq_res(eq, FORCE_FREE);
@@ -362,7 +373,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
if (hret != H_SUCCESS) {
ehea_error("destroy EQ failed");
return -EIO;
- }
+ }
return 0;
}
@@ -507,58 +518,126 @@ out_freemem:
u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
{
- u64 hret;
- struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
+ u64 hret;
+ struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
- ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
- hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
- if (hret != H_SUCCESS)
- return hret;
+ ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
+ hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
+ if (hret != H_SUCCESS)
+ return hret;
- hw_queue_dtor(&qp->hw_squeue);
- hw_queue_dtor(&qp->hw_rqueue1);
+ hw_queue_dtor(&qp->hw_squeue);
+ hw_queue_dtor(&qp->hw_rqueue1);
- if (qp_attr->rq_count > 1)
- hw_queue_dtor(&qp->hw_rqueue2);
- if (qp_attr->rq_count > 2)
- hw_queue_dtor(&qp->hw_rqueue3);
- kfree(qp);
+ if (qp_attr->rq_count > 1)
+ hw_queue_dtor(&qp->hw_rqueue2);
+ if (qp_attr->rq_count > 2)
+ hw_queue_dtor(&qp->hw_rqueue3);
+ kfree(qp);
- return hret;
+ return hret;
}
int ehea_destroy_qp(struct ehea_qp *qp)
{
- u64 hret;
- if (!qp)
- return 0;
+ u64 hret;
+ if (!qp)
+ return 0;
+
+ hcp_epas_dtor(&qp->epas);
+
+ if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
+ ehea_error_data(qp->adapter, qp->fw_handle);
+ hret = ehea_destroy_qp_res(qp, FORCE_FREE);
+ }
+
+ if (hret != H_SUCCESS) {
+ ehea_error("destroy QP failed");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int ehea_create_busmap( void )
+{
+ u64 vaddr = EHEA_BUSMAP_START;
+ unsigned long abs_max_pfn = 0;
+ unsigned long sec_max_pfn;
+ int i;
- if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
- ehea_error_data(qp->adapter, qp->fw_handle);
- hret = ehea_destroy_qp_res(qp, FORCE_FREE);
- }
+ /*
+ * Sections are not in ascending order -> Loop over all sections and
+ * find the highest PFN to compute the required map size.
+ */
+ ehea_bmap.valid_sections = 0;
+
+ for (i = 0; i < NR_MEM_SECTIONS; i++)
+ if (valid_section_nr(i)) {
+ sec_max_pfn = section_nr_to_pfn(i);
+ if (sec_max_pfn > abs_max_pfn)
+ abs_max_pfn = sec_max_pfn;
+ ehea_bmap.valid_sections++;
+ }
+
+ ehea_bmap.entries = abs_max_pfn / EHEA_PAGES_PER_SECTION + 1;
+ ehea_bmap.vaddr = vmalloc(ehea_bmap.entries * sizeof(*ehea_bmap.vaddr));
+
+ if (!ehea_bmap.vaddr)
+ return -ENOMEM;
- if (hret != H_SUCCESS) {
- ehea_error("destroy QP failed");
- return -EIO;
- }
+ for (i = 0 ; i < ehea_bmap.entries; i++) {
+ unsigned long pfn = section_nr_to_pfn(i);
- return 0;
+ if (pfn_valid(pfn)) {
+ ehea_bmap.vaddr[i] = vaddr;
+ vaddr += EHEA_SECTSIZE;
+ } else
+ ehea_bmap.vaddr[i] = 0;
+ }
+
+ return 0;
+}
+
+void ehea_destroy_busmap( void )
+{
+ vfree(ehea_bmap.vaddr);
+}
+
+u64 ehea_map_vaddr(void *caddr)
+{
+ u64 mapped_addr;
+ unsigned long index = __pa(caddr) >> SECTION_SIZE_BITS;
+
+ if (likely(index < ehea_bmap.entries)) {
+ mapped_addr = ehea_bmap.vaddr[index];
+ if (likely(mapped_addr))
+ mapped_addr |= (((unsigned long)caddr)
+ & (EHEA_SECTSIZE - 1));
+ else
+ mapped_addr = -1;
+ } else
+ mapped_addr = -1;
+
+ if (unlikely(mapped_addr == -1))
+ if (!test_and_set_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
+ queue_work(ehea_driver_wq, &ehea_rereg_mr_task);
+
+ return mapped_addr;
}
int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
{
- int i, k, ret;
- u64 hret, pt_abs, start, end, nr_pages;
- u32 acc_ctrl = EHEA_MR_ACC_CTRL;
+ int ret;
u64 *pt;
+ void *pg;
+ u64 hret, pt_abs, i, j, m, mr_len;
+ u32 acc_ctrl = EHEA_MR_ACC_CTRL;
- start = KERNELBASE;
- end = (u64)high_memory;
- nr_pages = (end - start) / EHEA_PAGESIZE;
+ mr_len = ehea_bmap.valid_sections * EHEA_SECTSIZE;
- pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ pt = kzalloc(EHEA_MAX_RPAGE * sizeof(u64), GFP_KERNEL);
if (!pt) {
ehea_error("no mem");
ret = -ENOMEM;
@@ -566,7 +645,8 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
}
pt_abs = virt_to_abs(pt);
- hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
+ hret = ehea_h_alloc_resource_mr(adapter->handle,
+ EHEA_BUSMAP_START, mr_len,
acc_ctrl, adapter->pd,
&mr->handle, &mr->lkey);
if (hret != H_SUCCESS) {
@@ -575,49 +655,43 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
goto out;
}
- mr->vaddr = KERNELBASE;
- k = 0;
-
- while (nr_pages > 0) {
- if (nr_pages > 1) {
- u64 num_pages = min(nr_pages, (u64)512);
- for (i = 0; i < num_pages; i++)
- pt[i] = virt_to_abs((void*)(((u64)start) +
- ((k++) *
- EHEA_PAGESIZE)));
-
- hret = ehea_h_register_rpage_mr(adapter->handle,
- mr->handle, 0,
- 0, (u64)pt_abs,
- num_pages);
- nr_pages -= num_pages;
- } else {
- u64 abs_adr = virt_to_abs((void*)(((u64)start) +
- (k * EHEA_PAGESIZE)));
-
- hret = ehea_h_register_rpage_mr(adapter->handle,
- mr->handle, 0,
- 0, abs_adr,1);
- nr_pages--;
- }
-
- if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
- ehea_h_free_resource(adapter->handle,
- mr->handle, FORCE_FREE);
- ehea_error("register_rpage_mr failed");
- ret = -EIO;
- goto out;
+ for (i = 0 ; i < ehea_bmap.entries; i++)
+ if (ehea_bmap.vaddr[i]) {
+ void *sectbase = __va(i << SECTION_SIZE_BITS);
+ unsigned long k = 0;
+
+ for (j = 0; j < (PAGES_PER_SECTION / EHEA_MAX_RPAGE);
+ j++) {
+
+ for (m = 0; m < EHEA_MAX_RPAGE; m++) {
+ pg = sectbase + ((k++) * EHEA_PAGESIZE);
+ pt[m] = virt_to_abs(pg);
+ }
+
+ hret = ehea_h_register_rpage_mr(adapter->handle,
+ mr->handle,
+ 0, 0, pt_abs,
+ EHEA_MAX_RPAGE);
+ if ((hret != H_SUCCESS)
+ && (hret != H_PAGE_REGISTERED)) {
+ ehea_h_free_resource(adapter->handle,
+ mr->handle,
+ FORCE_FREE);
+ ehea_error("register_rpage_mr failed");
+ ret = -EIO;
+ goto out;
+ }
+ }
}
- }
if (hret != H_SUCCESS) {
- ehea_h_free_resource(adapter->handle, mr->handle,
- FORCE_FREE);
- ehea_error("register_rpage failed for last page");
+ ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
+ ehea_error("registering mr failed");
ret = -EIO;
goto out;
}
+ mr->vaddr = EHEA_BUSMAP_START;
mr->adapter = adapter;
ret = 0;
out:
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index c0eb3e03a102..b71f8452a5e3 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -36,8 +36,14 @@
* page size of ehea hardware queues
*/
-#define EHEA_PAGESHIFT 12
-#define EHEA_PAGESIZE 4096UL
+#define EHEA_PAGESHIFT 12
+#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
+#define EHEA_SECTSIZE (1UL << 24)
+#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> PAGE_SHIFT)
+
+#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
+#error eHEA module can't work if kernel sectionsize < ehea sectionsize
+#endif
/* Some abbreviations used here:
*
@@ -372,4 +378,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
+int ehea_create_busmap( void );
+void ehea_destroy_busmap( void );
+u64 ehea_map_vaddr(void *caddr);
+
#endif /* __EHEA_QMR_H__ */