summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c6
-rw-r--r--drivers/net/3c509.c16
-rw-r--r--drivers/net/3c523.c2
-rw-r--r--drivers/net/3c527.c4
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c46
-rw-r--r--drivers/net/Kconfig8
-rw-r--r--drivers/net/acenic.c16
-rw-r--r--drivers/net/amd8111e.c4
-rw-r--r--drivers/net/arm/etherh.c5
-rw-r--r--drivers/net/arm/ks8695net.c7
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/atl1c/atl1c.h6
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c17
-rw-r--r--drivers/net/atl1c/atl1c_main.c39
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c7
-rw-r--r--drivers/net/atl1e/atl1e_main.c16
-rw-r--r--drivers/net/atlx/atl1.c29
-rw-r--r--drivers/net/atlx/atl2.c20
-rw-r--r--drivers/net/b44.c13
-rw-r--r--drivers/net/bcm63xx_enet.c9
-rw-r--r--drivers/net/benet/be.h37
-rw-r--r--drivers/net/benet/be_cmds.c186
-rw-r--r--drivers/net/benet/be_cmds.h47
-rw-r--r--drivers/net/benet/be_ethtool.c119
-rw-r--r--drivers/net/benet/be_hw.h4
-rw-r--r--drivers/net/benet/be_main.c256
-rw-r--r--drivers/net/bna/bfa_ioc.c54
-rw-r--r--drivers/net/bna/bfa_ioc.h1
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c28
-rw-r--r--drivers/net/bna/bfi.h6
-rw-r--r--drivers/net/bna/bna_ctrl.c21
-rw-r--r--drivers/net/bna/bna_hw.h2
-rw-r--r--drivers/net/bna/bna_txrx.c7
-rw-r--r--drivers/net/bna/bnad.c23
-rw-r--r--drivers/net/bna/bnad.h2
-rw-r--r--drivers/net/bna/bnad_ethtool.c70
-rw-r--r--drivers/net/bnx2.c186
-rw-r--r--drivers/net/bnx2.h3
-rw-r--r--drivers/net/bnx2x/bnx2x.h33
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c661
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h454
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c50
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h16
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c166
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h6
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_init_ops.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c28
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c369
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h46
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/bonding/Makefile3
-rw-r--r--drivers/net/bonding/bond_3ad.c50
-rw-r--r--drivers/net/bonding/bond_3ad.h4
-rw-r--r--drivers/net/bonding/bond_alb.c63
-rw-r--r--drivers/net/bonding/bond_alb.h7
-rw-r--r--drivers/net/bonding/bond_main.c530
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c84
-rw-r--r--drivers/net/bonding/bonding.h56
-rw-r--r--drivers/net/caif/caif_shmcore.c2
-rw-r--r--drivers/net/caif/caif_spi.c2
-rw-r--r--drivers/net/caif/caif_spi_slave.c4
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/c_can/c_can.c4
-rw-r--r--drivers/net/can/janz-ican3.c8
-rw-r--r--drivers/net/can/mcp251x.c3
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/sja1000/sja1000.c4
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/can/softing/softing.h2
-rw-r--r--drivers/net/can/softing/softing_main.c2
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c2
-rw-r--r--drivers/net/can/usb/esd_usb2.c2
-rw-r--r--drivers/net/cassini.c41
-rw-r--r--drivers/net/cassini.h2
-rw-r--r--drivers/net/chelsio/common.h5
-rw-r--r--drivers/net/chelsio/cxgb2.c59
-rw-r--r--drivers/net/chelsio/mv88e1xxx.c2
-rw-r--r--drivers/net/chelsio/pm3393.c2
-rw-r--r--drivers/net/chelsio/sge.c15
-rw-r--r--drivers/net/chelsio/tp.c5
-rw-r--r--drivers/net/chelsio/tp.h1
-rw-r--r--drivers/net/chelsio/vsc7326.c2
-rw-r--r--drivers/net/cnic.c62
-rw-r--r--drivers/net/cnic.h1
-rw-r--r--drivers/net/cnic_if.h6
-rw-r--r--drivers/net/cris/eth_v10.c6
-rw-r--r--drivers/net/cxgb3/adapter.h7
-rw-r--r--drivers/net/cxgb3/common.h1
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c117
-rw-r--r--drivers/net/cxgb3/sge.c9
-rw-r--r--drivers/net/cxgb3/t3_hw.c6
-rw-r--r--drivers/net/cxgb4/cxgb4.h6
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c103
-rw-r--r--drivers/net/cxgb4/sge.c4
-rw-r--r--drivers/net/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/cxgb4vf/adapter.h6
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c75
-rw-r--r--drivers/net/cxgb4vf/sge.c12
-rw-r--r--drivers/net/davinci_emac.c8
-rw-r--r--drivers/net/depca.c35
-rw-r--r--drivers/net/dl2k.c29
-rw-r--r--drivers/net/dm9000.c57
-rw-r--r--drivers/net/dummy.c4
-rw-r--r--drivers/net/e100.c78
-rw-r--r--drivers/net/e1000/e1000.h5
-rw-r--r--drivers/net/e1000/e1000_ethtool.c62
-rw-r--r--drivers/net/e1000/e1000_hw.h2
-rw-r--r--drivers/net/e1000/e1000_main.c44
-rw-r--r--drivers/net/e1000e/82571.c22
-rw-r--r--drivers/net/e1000e/e1000.h12
-rw-r--r--drivers/net/e1000e/es2lan.c5
-rw-r--r--drivers/net/e1000e/ethtool.c154
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c16
-rw-r--r--drivers/net/e1000e/lib.c18
-rw-r--r--drivers/net/e1000e/netdev.c215
-rw-r--r--drivers/net/e1000e/phy.c4
-rw-r--r--drivers/net/eepro.c2
-rw-r--r--drivers/net/ehea/ehea_ethtool.c67
-rw-r--r--drivers/net/ehea/ehea_main.c13
-rw-r--r--drivers/net/enc28j60.c5
-rw-r--r--drivers/net/enc28j60_hw.h2
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_dev.c62
-rw-r--r--drivers/net/enic/enic_dev.h7
-rw-r--r--drivers/net/enic/enic_main.c274
-rw-r--r--drivers/net/enic/enic_pp.c264
-rw-r--r--drivers/net/enic/enic_pp.h27
-rw-r--r--drivers/net/enic/enic_res.c4
-rw-r--r--drivers/net/enic/vnic_dev.c97
-rw-r--r--drivers/net/enic/vnic_dev.h6
-rw-r--r--drivers/net/enic/vnic_devcmd.h57
-rw-r--r--drivers/net/enic/vnic_vic.c5
-rw-r--r--drivers/net/enic/vnic_vic.h13
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/ethoc.c2
-rw-r--r--drivers/net/ewrk3.c58
-rw-r--r--drivers/net/fec.h4
-rw-r--r--drivers/net/forcedeth.c127
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/fs_enet/mac-fec.c8
-rw-r--r--drivers/net/ftmac100.c8
-rw-r--r--drivers/net/gianfar.c16
-rw-r--r--drivers/net/gianfar.h22
-rw-r--r--drivers/net/gianfar_ethtool.c110
-rw-r--r--drivers/net/greth.c46
-rw-r--r--drivers/net/greth.h4
-rw-r--r--drivers/net/hamachi.c78
-rw-r--r--drivers/net/hamradio/Makefile2
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hp100.c28
-rw-r--r--drivers/net/hp100.h2
-rw-r--r--drivers/net/ibm_newemac/core.c17
-rw-r--r--drivers/net/ibm_newemac/tah.c2
-rw-r--r--drivers/net/ibmlana.c11
-rw-r--r--drivers/net/ibmlana.h2
-rw-r--r--drivers/net/ibmveth.c98
-rw-r--r--drivers/net/ifb.c4
-rw-r--r--drivers/net/igb/e1000_82575.c11
-rw-r--r--drivers/net/igb/e1000_mac.c4
-rw-r--r--drivers/net/igb/e1000_phy.c2
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_ethtool.c48
-rw-r--r--drivers/net/igb/igb_main.c104
-rw-r--r--drivers/net/igbvf/ethtool.c14
-rw-r--r--drivers/net/igbvf/netdev.c2
-rw-r--r--drivers/net/ioc3-eth.c30
-rw-r--r--drivers/net/ipg.c28
-rw-r--r--drivers/net/irda/ali-ircc.c4
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/donauboe.h2
-rw-r--r--drivers/net/irda/girbil-sir.c2
-rw-r--r--drivers/net/irda/irda-usb.c4
-rw-r--r--drivers/net/irda/mcs7780.c6
-rw-r--r--drivers/net/irda/nsc-ircc.c2
-rw-r--r--drivers/net/irda/nsc-ircc.h2
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c46
-rw-r--r--drivers/net/irda/via-ircc.c4
-rw-r--r--drivers/net/irda/vlsi_ir.h4
-rw-r--r--drivers/net/ixgb/ixgb.h3
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c53
-rw-r--r--drivers/net/ixgbe/ixgbe.h111
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c61
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c178
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c532
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c77
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c111
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c191
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1045
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c22
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c100
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h218
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c319
-rw-r--r--drivers/net/ixgbevf/ethtool.c8
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c32
-rw-r--r--drivers/net/ixgbevf/mbx.h1
-rw-r--r--drivers/net/ixgbevf/vf.c34
-rw-r--r--drivers/net/ixgbevf/vf.h1
-rw-r--r--drivers/net/jme.c80
-rw-r--r--drivers/net/jme.h2
-rw-r--r--drivers/net/ks8842.c2
-rw-r--r--drivers/net/ks8851.c8
-rw-r--r--drivers/net/ks8851_mll.c4
-rw-r--r--drivers/net/ksz884x.c82
-rw-r--r--drivers/net/lib8390.c4
-rw-r--r--drivers/net/loopback.c4
-rw-r--r--drivers/net/lp486e.c2
-rw-r--r--drivers/net/macb.c11
-rw-r--r--drivers/net/macvlan.c21
-rw-r--r--drivers/net/mdio.c23
-rw-r--r--drivers/net/meth.h4
-rw-r--r--drivers/net/mii.c35
-rw-r--r--drivers/net/mlx4/en_ethtool.c49
-rw-r--r--drivers/net/mlx4/en_main.c2
-rw-r--r--drivers/net/mlx4/en_netdev.c36
-rw-r--r--drivers/net/mlx4/en_rx.c8
-rw-r--r--drivers/net/mlx4/en_selftest.c2
-rw-r--r--drivers/net/mlx4/en_tx.c2
-rw-r--r--drivers/net/mlx4/main.c5
-rw-r--r--drivers/net/mlx4/mcg.c2
-rw-r--r--drivers/net/mlx4/mlx4.h2
-rw-r--r--drivers/net/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/mlx4/port.c4
-rw-r--r--drivers/net/mlx4/sense.c4
-rw-r--r--drivers/net/mv643xx_eth.c34
-rw-r--r--drivers/net/myri10ge/myri10ge.c70
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/natsemi.c20
-rw-r--r--drivers/net/ne3210.c15
-rw-r--r--drivers/net/netconsole.c90
-rw-r--r--drivers/net/netxen/netxen_nic.h6
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c117
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c3
-rw-r--r--drivers/net/netxen/netxen_nic_main.c72
-rw-r--r--drivers/net/niu.c63
-rw-r--r--drivers/net/niu.h1
-rw-r--r--drivers/net/ns83820.c12
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h18
-rw-r--r--drivers/net/pch_gbe/pch_gbe_ethtool.c67
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c75
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c16
-rw-r--r--drivers/net/pch_gbe/pch_gbe_phy.c4
-rw-r--r--drivers/net/pci-skeleton.c2
-rw-r--r--drivers/net/pcmcia/3c574_cs.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c4
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c8
-rw-r--r--drivers/net/pcnet32.c92
-rw-r--r--drivers/net/phy/phy.c12
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/ppp_generic.c2
-rw-r--r--drivers/net/ppp_synctty.c2
-rw-r--r--drivers/net/pppoe.c4
-rw-r--r--drivers/net/pptp.c10
-rw-r--r--drivers/net/ps3_gelic_net.c38
-rw-r--r--drivers/net/ps3_gelic_net.h5
-rw-r--r--drivers/net/ps3_gelic_wireless.c6
-rw-r--r--drivers/net/pxa168_eth.c2
-rw-r--r--drivers/net/qla3xxx.c4
-rw-r--r--drivers/net/qla3xxx.h2
-rw-r--r--drivers/net/qlcnic/qlcnic.h417
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c250
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c417
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h40
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c578
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c66
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c646
-rw-r--r--drivers/net/qlge/qlge.h2
-rw-r--r--drivers/net/qlge/qlge_ethtool.c74
-rw-r--r--drivers/net/qlge/qlge_main.c35
-rw-r--r--drivers/net/r6040.c2
-rw-r--r--drivers/net/r8169.c1045
-rw-r--r--drivers/net/s2io.c203
-rw-r--r--drivers/net/s2io.h15
-rw-r--r--drivers/net/sc92031.c8
-rw-r--r--drivers/net/sfc/efx.c32
-rw-r--r--drivers/net/sfc/ethtool.c111
-rw-r--r--drivers/net/sfc/falcon.c8
-rw-r--r--drivers/net/sfc/falcon_xmac.c2
-rw-r--r--drivers/net/sfc/io.h2
-rw-r--r--drivers/net/sfc/mac.h4
-rw-r--r--drivers/net/sfc/mcdi.c2
-rw-r--r--drivers/net/sfc/mcdi_mac.c2
-rw-r--r--drivers/net/sfc/mcdi_pcol.h6
-rw-r--r--drivers/net/sfc/mcdi_phy.c10
-rw-r--r--drivers/net/sfc/mdio_10g.c4
-rw-r--r--drivers/net/sfc/net_driver.h12
-rw-r--r--drivers/net/sfc/nic.c28
-rw-r--r--drivers/net/sfc/nic.h7
-rw-r--r--drivers/net/sfc/phy.h8
-rw-r--r--drivers/net/sfc/qt202x_phy.c2
-rw-r--r--drivers/net/sfc/rx.c3
-rw-r--r--drivers/net/sfc/selftest.c25
-rw-r--r--drivers/net/sfc/siena.c2
-rw-r--r--drivers/net/sfc/tenxpress.c4
-rw-r--r--drivers/net/sfc/tx.c3
-rw-r--r--drivers/net/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/sgiseeq.c4
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/sis190.c4
-rw-r--r--drivers/net/sis900.c29
-rw-r--r--drivers/net/skfp/ess.c8
-rw-r--r--drivers/net/skfp/fplustm.c6
-rw-r--r--drivers/net/skfp/h/cmtdef.h4
-rw-r--r--drivers/net/skfp/h/fplustm.h4
-rw-r--r--drivers/net/skfp/h/smc.h2
-rw-r--r--drivers/net/skfp/h/smt.h2
-rw-r--r--drivers/net/skfp/h/supern_2.h4
-rw-r--r--drivers/net/skfp/hwmtm.c2
-rw-r--r--drivers/net/skfp/pcmplc.c6
-rw-r--r--drivers/net/skfp/smt.c2
-rw-r--r--drivers/net/skge.c93
-rw-r--r--drivers/net/skge.h9
-rw-r--r--drivers/net/sky2.c199
-rw-r--r--drivers/net/sky2.h7
-rw-r--r--drivers/net/slip.c4
-rw-r--r--drivers/net/smc-mca.c6
-rw-r--r--drivers/net/smc911x.c4
-rw-r--r--drivers/net/smc91x.c4
-rw-r--r--drivers/net/smc91x.h2
-rw-r--r--drivers/net/smsc911x.c456
-rw-r--r--drivers/net/smsc911x.h22
-rw-r--r--drivers/net/smsc9420.c2
-rw-r--r--drivers/net/spider_net.c15
-rw-r--r--drivers/net/spider_net.h7
-rw-r--r--drivers/net/spider_net_ethtool.c23
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c5
-rw-r--r--drivers/net/stmmac/dwmac_lib.c28
-rw-r--r--drivers/net/stmmac/norm_desc.c2
-rw-r--r--drivers/net/stmmac/stmmac.h1
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c33
-rw-r--r--drivers/net/stmmac/stmmac_main.c135
-rw-r--r--drivers/net/sunbmac.h2
-rw-r--r--drivers/net/sungem.c20
-rw-r--r--drivers/net/sunhme.c25
-rw-r--r--drivers/net/sunhme.h2
-rw-r--r--drivers/net/tc35815.c38
-rw-r--r--drivers/net/tehuti.c40
-rw-r--r--drivers/net/tehuti.h2
-rw-r--r--drivers/net/tg3.c2901
-rw-r--r--drivers/net/tg3.h271
-rw-r--r--drivers/net/tile/tilepro.c2
-rw-r--r--drivers/net/tokenring/3c359.c6
-rw-r--r--drivers/net/tokenring/lanstreamer.c2
-rw-r--r--drivers/net/tokenring/madgemc.c8
-rw-r--r--drivers/net/tokenring/olympic.c59
-rw-r--r--drivers/net/tokenring/smctr.c12
-rw-r--r--drivers/net/tokenring/tms380tr.h2
-rw-r--r--drivers/net/tsi108_eth.h8
-rw-r--r--drivers/net/tulip/21142.c14
-rw-r--r--drivers/net/tulip/Makefile2
-rw-r--r--drivers/net/tulip/de2104x.c183
-rw-r--r--drivers/net/tulip/de4x5.c10
-rw-r--r--drivers/net/tulip/dmfe.c11
-rw-r--r--drivers/net/tulip/eeprom.c6
-rw-r--r--drivers/net/tulip/interrupt.c48
-rw-r--r--drivers/net/tulip/media.c49
-rw-r--r--drivers/net/tulip/pnic.c22
-rw-r--r--drivers/net/tulip/pnic2.c16
-rw-r--r--drivers/net/tulip/timer.c47
-rw-r--r--drivers/net/tulip/tulip.h8
-rw-r--r--drivers/net/tulip/tulip_core.c50
-rw-r--r--drivers/net/tulip/uli526x.c71
-rw-r--r--drivers/net/tulip/winbond-840.c81
-rw-r--r--drivers/net/tulip/xircom_cb.c268
-rw-r--r--drivers/net/tun.c69
-rw-r--r--drivers/net/typhoon.c60
-rw-r--r--drivers/net/ucc_geth.h4
-rw-r--r--drivers/net/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/asix.c28
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc_eem.c2
-rw-r--r--drivers/net/usb/cdc_ether.c16
-rw-r--r--drivers/net/usb/cdc_ncm.c11
-rw-r--r--drivers/net/usb/dm9601.c6
-rw-r--r--drivers/net/usb/ipheth.c14
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/plusb.c32
-rw-r--r--drivers/net/usb/rndis_host.c39
-rw-r--r--drivers/net/usb/rtl8150.c11
-rw-r--r--drivers/net/usb/smsc75xx.c131
-rw-r--r--drivers/net/usb/smsc95xx.c107
-rw-r--r--drivers/net/usb/usbnet.c24
-rw-r--r--drivers/net/veth.c60
-rw-r--r--drivers/net/via-rhine.c244
-rw-r--r--drivers/net/via-velocity.c32
-rw-r--r--drivers/net/virtio_net.c46
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c57
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c71
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h11
-rw-r--r--drivers/net/vxge/vxge-config.c50
-rw-r--r--drivers/net/vxge/vxge-config.h70
-rw-r--r--drivers/net/vxge/vxge-ethtool.c104
-rw-r--r--drivers/net/vxge/vxge-main.c151
-rw-r--r--drivers/net/vxge/vxge-main.h14
-rw-r--r--drivers/net/vxge/vxge-traffic.c4
-rw-r--r--drivers/net/vxge/vxge-traffic.h4
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wan/dlci.c4
-rw-r--r--drivers/net/wan/dscc4.c8
-rw-r--r--drivers/net/wan/hdlc_fr.c9
-rw-r--r--drivers/net/wan/hostess_sv11.c2
-rw-r--r--drivers/net/wan/ixp4xx_hss.c4
-rw-r--r--drivers/net/wan/lapbether.c4
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/lmc/lmc_var.h6
-rw-r--r--drivers/net/wan/z85230.c8
-rw-r--r--drivers/net/wimax/i2400m/control.c4
-rw-r--r--drivers/net/wimax/i2400m/driver.c4
-rw-r--r--drivers/net/wimax/i2400m/fw.c4
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h6
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h4
-rw-r--r--drivers/net/wimax/i2400m/netdev.c2
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c4
-rw-r--r--drivers/net/wimax/i2400m/rx.c4
-rw-r--r--drivers/net/wimax/i2400m/tx.c4
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-rx.c2
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c2
-rw-r--r--drivers/net/wireless/airo.c14
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c14
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c22
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h2
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/hw.c2
-rw-r--r--drivers/net/wireless/ath/regd.c4
-rw-r--r--drivers/net/wireless/atmel.c4
-rw-r--r--drivers/net/wireless/atmel_cs.c2
-rw-r--r--drivers/net/wireless/b43/b43.h2
-rw-r--r--drivers/net/wireless/b43/main.c3
-rw-r--r--drivers/net/wireless/b43/phy_g.h2
-rw-r--r--drivers/net/wireless/b43/phy_n.h2
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c7
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c18
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c18
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c20
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c4
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c2
-rw-r--r--drivers/net/wireless/libertas/README4
-rw-r--r--drivers/net/wireless/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/libertas/if_spi.h2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c13
-rw-r--r--drivers/net/wireless/mwifiex/main.c4
-rw-r--r--drivers/net/wireless/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54spi.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c4
-rw-r--r--drivers/net/wireless/rayctl.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h2
-rw-r--r--drivers/net/wireless/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c2
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h2
-rw-r--r--drivers/net/wireless/wl1251/cmd.c2
-rw-r--r--drivers/net/wireless/wl1251/rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c2
-rw-r--r--drivers/net/wireless/wl12xx/conf.h8
-rw-r--r--drivers/net/wireless/wl12xx/io.h2
-rw-r--r--drivers/net/wireless/wl3501_cs.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_rf2959.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_uw2453.c2
-rw-r--r--drivers/net/xen-netback/common.h3
-rw-r--r--drivers/net/xen-netback/interface.c84
-rw-r--r--drivers/net/xen-netback/xenbus.c2
-rw-r--r--drivers/net/xen-netfront.c106
-rw-r--r--drivers/net/xilinx_emaclite.c2
-rw-r--r--drivers/net/znet.c2
525 files changed, 12524 insertions, 10757 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 9e1c03eb97ae..5420f6de27df 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -399,7 +399,7 @@ static void el_timeout(struct net_device *dev)
* as we may still be attempting to retrieve the last RX packet buffer.
*
* When a transmit times out we dump the card into control mode and just
- * start again. It happens enough that it isnt worth logging.
+ * start again. It happens enough that it isn't worth logging.
*
* We avoid holding the spin locks when doing the packet load to the board.
* The device is very slow, and its DMA mode is even slower. If we held the
@@ -499,7 +499,7 @@ static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
*
* Handle the ether interface interrupts. The 3c501 needs a lot more
* hand holding than most cards. In particular we get a transmit interrupt
- * with a collision error because the board firmware isnt capable of rewinding
+ * with a collision error because the board firmware isn't capable of rewinding
* its own transmit buffer pointers. It can however count to 16 for us.
*
* On the receive side the card is also very dumb. It has no buffering to
@@ -732,7 +732,7 @@ static void el_receive(struct net_device *dev)
* el_reset: Reset a 3c501 card
* @dev: The 3c501 card about to get zapped
*
- * Even resetting a 3c501 isnt simple. When you activate reset it loses all
+ * Even resetting a 3c501 isn't simple. When you activate reset it loses all
* its configuration. You must hold the lock when doing this. The function
* cannot take the lock itself as it is callable from the irq handler.
*/
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 91abb965fb44..5f25889e27ef 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -185,7 +185,7 @@ static int max_interrupt_work = 10;
static int nopnp;
#endif
-static int __devinit el3_common_init(struct net_device *dev);
+static int el3_common_init(struct net_device *dev);
static void el3_common_remove(struct net_device *dev);
static ushort id_read_eeprom(int index);
static ushort read_eeprom(int ioaddr, int index);
@@ -395,7 +395,7 @@ static struct isa_driver el3_isa_driver = {
static int isa_registered;
#ifdef CONFIG_PNP
-static struct pnp_device_id el3_pnp_ids[] = {
+static const struct pnp_device_id el3_pnp_ids[] __devinitconst = {
{ .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
{ .id = "TCM5091" }, /* 3Com Etherlink III */
{ .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
@@ -478,7 +478,7 @@ static int pnp_registered;
#endif /* CONFIG_PNP */
#ifdef CONFIG_EISA
-static struct eisa_device_id el3_eisa_ids[] = {
+static const struct eisa_device_id el3_eisa_ids[] __devinitconst = {
{ "TCM5090" },
{ "TCM5091" },
{ "TCM5092" },
@@ -508,7 +508,7 @@ static int eisa_registered;
#ifdef CONFIG_MCA
static int el3_mca_probe(struct device *dev);
-static short el3_mca_adapter_ids[] __initdata = {
+static const short el3_mca_adapter_ids[] __devinitconst = {
0x627c,
0x627d,
0x62db,
@@ -517,7 +517,7 @@ static short el3_mca_adapter_ids[] __initdata = {
0x0000
};
-static char *el3_mca_adapter_names[] __initdata = {
+static const char *const el3_mca_adapter_names[] __devinitconst = {
"3Com 3c529 EtherLink III (10base2)",
"3Com 3c529 EtherLink III (10baseT)",
"3Com 3c529 EtherLink III (test mode)",
@@ -601,7 +601,7 @@ static void el3_common_remove (struct net_device *dev)
}
#ifdef CONFIG_MCA
-static int __init el3_mca_probe(struct device *device)
+static int __devinit el3_mca_probe(struct device *device)
{
/* Based on Erik Nygren's (nygren@mit.edu) 3c529 patch,
* heavily modified by Chris Beauregard
@@ -671,7 +671,7 @@ static int __init el3_mca_probe(struct device *device)
#endif /* CONFIG_MCA */
#ifdef CONFIG_EISA
-static int __init el3_eisa_probe (struct device *device)
+static int __devinit el3_eisa_probe (struct device *device)
{
short i;
int ioaddr, irq, if_port;
@@ -1207,7 +1207,7 @@ el3_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->duplex = DUPLEX_FULL;
}
- ecmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
EL3WINDOW(1);
return 0;
}
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index de579d043169..bc0d1a1c2e28 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -44,7 +44,7 @@
this for the 64K version would require a lot of heinous bank
switching, which I'm sure not interested in doing. If you try to
implement a bank switching version, you'll basically have to remember
- what bank is enabled and do a switch everytime you access a memory
+ what bank is enabled and do a switch every time you access a memory
location that's not current. You'll also have to remap pointers on
the driver side, because it only knows about 16K of the memory.
Anyone desperate or masochistic enough to try?
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 8c094bae8bf3..d9d056d207f3 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -51,7 +51,7 @@ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Richard Procter <rnp@paradise.net.
* circular buffer queues.
*
* The mailboxes can be used for controlling how the card traverses
- * its buffer rings, but are used only for inital setup in this
+ * its buffer rings, but are used only for initial setup in this
* implementation. The exec mailbox allows a variety of commands to
* be executed. Each command must complete before the next is
* executed. Primarily we use the exec mailbox for controlling the
@@ -813,7 +813,7 @@ static void mc32_flush_rx_ring(struct net_device *dev)
*
* This sets up the host transmit data-structures.
*
- * First, we obtain from the card it's current postion in the tx
+ * First, we obtain from the card it's current position in the tx
* ring, so that we will know where to begin transmitting
* packets.
*
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 0a92436f0538..99f43d275442 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -901,14 +901,14 @@ static const struct dev_pm_ops vortex_pm_ops = {
#endif /* !CONFIG_PM */
#ifdef CONFIG_EISA
-static struct eisa_device_id vortex_eisa_ids[] = {
+static const struct eisa_device_id vortex_eisa_ids[] __devinitconst = {
{ "TCM5920", CH_3C592 },
{ "TCM5970", CH_3C597 },
{ "" }
};
MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
-static int __init vortex_eisa_probe(struct device *device)
+static int __devinit vortex_eisa_probe(struct device *device)
{
void __iomem *ioaddr;
struct eisa_device *edev;
@@ -984,7 +984,7 @@ static int __init vortex_eisa_init(void)
* any device have been found when we exit from
* eisa_driver_register (the bus root driver may not be
* initialized yet). So we blindly assume something was
- * found, and let the sysfs magic happend...
+ * found, and let the sysfs magic happened...
*/
eisa_found = 1;
}
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index dd16e83933a2..10c45051caea 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -758,8 +758,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
entry = cp->tx_head;
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
- if (dev->features & NETIF_F_TSO)
- mss = skb_shinfo(skb)->gso_size;
+ mss = skb_shinfo(skb)->gso_size;
if (skb_shinfo(skb)->nr_frags == 0) {
struct cp_desc *txd = &cp->tx_ring[entry];
@@ -1416,32 +1415,23 @@ static void cp_set_msglevel(struct net_device *dev, u32 value)
cp->msg_enable = value;
}
-static u32 cp_get_rx_csum(struct net_device *dev)
+static int cp_set_features(struct net_device *dev, u32 features)
{
struct cp_private *cp = netdev_priv(dev);
- return (cpr16(CpCmd) & RxChkSum) ? 1 : 0;
-}
+ unsigned long flags;
-static int cp_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct cp_private *cp = netdev_priv(dev);
- u16 cmd = cp->cpcmd, newcmd;
+ if (!((dev->features ^ features) & NETIF_F_RXCSUM))
+ return 0;
- newcmd = cmd;
+ spin_lock_irqsave(&cp->lock, flags);
- if (data)
- newcmd |= RxChkSum;
+ if (features & NETIF_F_RXCSUM)
+ cp->cpcmd |= RxChkSum;
else
- newcmd &= ~RxChkSum;
-
- if (newcmd != cmd) {
- unsigned long flags;
+ cp->cpcmd &= ~RxChkSum;
- spin_lock_irqsave(&cp->lock, flags);
- cp->cpcmd = newcmd;
- cpw16_f(CpCmd, newcmd);
- spin_unlock_irqrestore(&cp->lock, flags);
- }
+ cpw16_f(CpCmd, cp->cpcmd);
+ spin_unlock_irqrestore(&cp->lock, flags);
return 0;
}
@@ -1554,11 +1544,6 @@ static const struct ethtool_ops cp_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_msglevel = cp_get_msglevel,
.set_msglevel = cp_set_msglevel,
- .get_rx_csum = cp_get_rx_csum,
- .set_rx_csum = cp_set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
- .set_sg = ethtool_op_set_sg,
- .set_tso = ethtool_op_set_tso,
.get_regs = cp_get_regs,
.get_wol = cp_get_wol,
.set_wol = cp_set_wol,
@@ -1831,6 +1816,7 @@ static const struct net_device_ops cp_netdev_ops = {
.ndo_do_ioctl = cp_ioctl,
.ndo_start_xmit = cp_start_xmit,
.ndo_tx_timeout = cp_tx_timeout,
+ .ndo_set_features = cp_set_features,
#if CP_VLAN_TAG_USED
.ndo_vlan_rx_register = cp_vlan_rx_register,
#endif
@@ -1934,6 +1920,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
+ dev->features |= NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_RXCSUM;
+
regs = ioremap(pciaddr, CP_REGS_SIZE);
if (!regs) {
rc = -EIO;
@@ -1966,9 +1955,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
-#if 0 /* disabled by default until verified */
- dev->features |= NETIF_F_TSO;
-#endif
+ /* disabled by default until verified */
+ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
dev->irq = pdev->irq;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index dc280bc8eba2..6c884ef1b069 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2536,7 +2536,7 @@ config S6GMAC
source "drivers/net/stmmac/Kconfig"
config PCH_GBE
- tristate "PCH Gigabit Ethernet"
+ tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GbE"
depends on PCI
select MII
---help---
@@ -2548,6 +2548,12 @@ config PCH_GBE
to Gigabit Ethernet.
This driver enables Gigabit Ethernet function.
+ This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
+ Output Hub), ML7223.
+ ML7223 IOH is for MP(Media Phone) use.
+ ML7223 is companion chip for Intel Atom E6xx series.
+ ML7223 is completely compatible for Intel EG20T PCH.
+
endif # NETDEV_1000
#
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 41d9911202d0..82260ca70323 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -1584,7 +1584,7 @@ static void ace_watchdog(struct net_device *data)
/*
* We haven't received a stats update event for more than 2.5
* seconds and there is data in the transmit queue, thus we
- * asume the card is stuck.
+ * assume the card is stuck.
*/
if (*ap->tx_csm != ap->tx_ret_csm) {
printk(KERN_WARNING "%s: Transmitter is stuck, %08x\n",
@@ -2564,7 +2564,7 @@ restart:
/*
* A TX-descriptor producer (an IRQ) might have gotten
- * inbetween, making the ring free again. Since xmit is
+ * between, making the ring free again. Since xmit is
* serialized, this is the only situation we have to
* re-test.
*/
@@ -2658,15 +2658,15 @@ static int ace_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
link = readl(&regs->GigLnkState);
if (link & LNK_1000MB)
- ecmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
else {
link = readl(&regs->FastLnkState);
if (link & LNK_100MB)
- ecmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
else if (link & LNK_10MB)
- ecmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
else
- ecmd->speed = 0;
+ ethtool_cmd_speed_set(ecmd, 0);
}
if (link & LNK_FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
@@ -2718,9 +2718,9 @@ static int ace_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
link |= LNK_TX_FLOW_CTL_Y;
if (ecmd->autoneg == AUTONEG_ENABLE)
link |= LNK_NEGOTIATE;
- if (ecmd->speed != speed) {
+ if (ethtool_cmd_speed(ecmd) != speed) {
link &= ~(LNK_1000MB | LNK_100MB | LNK_10MB);
- switch (speed) {
+ switch (ethtool_cmd_speed(ecmd)) {
case SPEED_1000:
link |= LNK_1000MB;
break;
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 2ca880b4c0db..241b185e6569 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -106,7 +106,7 @@ MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "M
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
module_param_array(speed_duplex, int, NULL, 0);
-MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotitate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
+MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
module_param_array(coalesce, bool, NULL, 0);
MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0: Disable");
module_param_array(dynamic_ipg, bool, NULL, 0);
@@ -1398,7 +1398,7 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
mc_filter[1] = mc_filter[0] = 0;
lp->options &= ~OPTION_MULTICAST_ENABLE;
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
- /* disable promiscous mode */
+ /* disable promiscuous mode */
writel(PROM, lp->mmio + CMD2);
return;
}
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c
index 4af235d41fda..e252cd595016 100644
--- a/drivers/net/arm/etherh.c
+++ b/drivers/net/arm/etherh.c
@@ -591,10 +591,11 @@ static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
static int etherh_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = etherh_priv(dev)->supported;
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
cmd->duplex = DUPLEX_HALF;
cmd->port = dev->if_port == IF_PORT_10BASET ? PORT_TP : PORT_BNC;
- cmd->autoneg = dev->flags & IFF_AUTOMEDIA ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+ cmd->autoneg = (dev->flags & IFF_AUTOMEDIA ?
+ AUTONEG_ENABLE : AUTONEG_DISABLE);
return 0;
}
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index aa07657744c3..a7b0caa18179 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -891,15 +891,16 @@ ks8695_wan_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
cmd->advertising |= ADVERTISED_Pause;
cmd->autoneg = AUTONEG_ENABLE;
- cmd->speed = (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10;
+ ethtool_cmd_speed_set(cmd,
+ (ctrl & WMC_WSS) ? SPEED_100 : SPEED_10);
cmd->duplex = (ctrl & WMC_WDS) ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
/* auto-negotiation is disabled */
cmd->autoneg = AUTONEG_DISABLE;
- cmd->speed = (ctrl & WMC_WANF100) ?
- SPEED_100 : SPEED_10;
+ ethtool_cmd_speed_set(cmd, ((ctrl & WMC_WANF100) ?
+ SPEED_100 : SPEED_10));
cmd->duplex = (ctrl & WMC_WANFF) ?
DUPLEX_FULL : DUPLEX_HALF;
}
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index f4744fc89768..65a78f965dd2 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -133,7 +133,7 @@ struct net_local {
/* Run-time register bank 2 definitions. */
#define DATAPORT 8 /* Word-wide DMA or programmed-I/O dataport. */
#define TX_START 10
-#define COL16CNTL 11 /* Controll Reg for 16 collisions */
+#define COL16CNTL 11 /* Control Reg for 16 collisions */
#define MODE13 13
#define RX_CTRL 14
/* Configuration registers only on the '865A/B chips. */
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 7cb375e0e29c..925929d764ca 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -566,9 +566,9 @@ struct atl1c_adapter {
#define __AT_TESTING 0x0001
#define __AT_RESETTING 0x0002
#define __AT_DOWN 0x0003
- u8 work_event;
-#define ATL1C_WORK_EVENT_RESET 0x01
-#define ATL1C_WORK_EVENT_LINK_CHANGE 0x02
+ unsigned long work_event;
+#define ATL1C_WORK_EVENT_RESET 0
+#define ATL1C_WORK_EVENT_LINK_CHANGE 1
u32 msg_enable;
bool have_msi;
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 7c521508313c..7be884d0aaf6 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -50,13 +50,13 @@ static int atl1c_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_INTERNAL;
if (adapter->link_speed != SPEED_0) {
- ecmd->speed = adapter->link_speed;
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
@@ -77,7 +77,8 @@ static int atl1c_set_settings(struct net_device *netdev,
if (ecmd->autoneg == AUTONEG_ENABLE) {
autoneg_advertised = ADVERTISED_Autoneg;
} else {
- if (ecmd->speed == SPEED_1000) {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (speed == SPEED_1000) {
if (ecmd->duplex != DUPLEX_FULL) {
if (netif_msg_link(adapter))
dev_warn(&adapter->pdev->dev,
@@ -86,7 +87,7 @@ static int atl1c_set_settings(struct net_device *netdev,
return -EINVAL;
}
autoneg_advertised = ADVERTISED_1000baseT_Full;
- } else if (ecmd->speed == SPEED_100) {
+ } else if (speed == SPEED_100) {
if (ecmd->duplex == DUPLEX_FULL)
autoneg_advertised = ADVERTISED_100baseT_Full;
else
@@ -113,11 +114,6 @@ static int atl1c_set_settings(struct net_device *netdev,
return 0;
}
-static u32 atl1c_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
static u32 atl1c_get_msglevel(struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
@@ -307,9 +303,6 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_eeprom_len = atl1c_get_eeprom_len,
.get_eeprom = atl1c_get_eeprom,
- .get_tx_csum = atl1c_get_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
};
void atl1c_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 7d9d5067a65c..48868de386a0 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -325,7 +325,7 @@ static void atl1c_link_chg_event(struct atl1c_adapter *adapter)
}
}
- adapter->work_event |= ATL1C_WORK_EVENT_LINK_CHANGE;
+ set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event);
schedule_work(&adapter->common_task);
}
@@ -337,20 +337,16 @@ static void atl1c_common_task(struct work_struct *work)
adapter = container_of(work, struct atl1c_adapter, common_task);
netdev = adapter->netdev;
- if (adapter->work_event & ATL1C_WORK_EVENT_RESET) {
- adapter->work_event &= ~ATL1C_WORK_EVENT_RESET;
+ if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
netif_device_detach(netdev);
atl1c_down(adapter);
atl1c_up(adapter);
netif_device_attach(netdev);
- return;
}
- if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) {
- adapter->work_event &= ~ATL1C_WORK_EVENT_LINK_CHANGE;
+ if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
+ &adapter->work_event))
atl1c_check_link_status(adapter);
- }
- return;
}
@@ -369,7 +365,7 @@ static void atl1c_tx_timeout(struct net_device *netdev)
struct atl1c_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
- adapter->work_event |= ATL1C_WORK_EVENT_RESET;
+ set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
schedule_work(&adapter->common_task);
}
@@ -484,6 +480,15 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
}
+
+static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
+{
+ if (netdev->mtu > MAX_TSO_FRAME_SIZE)
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return features;
+}
+
/*
* atl1c_change_mtu - Change the Maximum Transfer Unit
* @netdev: network interface device structure
@@ -510,14 +515,8 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
adapter->hw.max_frame_size = new_mtu;
atl1c_set_rxbufsize(adapter, netdev);
- if (new_mtu > MAX_TSO_FRAME_SIZE) {
- adapter->netdev->features &= ~NETIF_F_TSO;
- adapter->netdev->features &= ~NETIF_F_TSO6;
- } else {
- adapter->netdev->features |= NETIF_F_TSO;
- adapter->netdev->features |= NETIF_F_TSO6;
- }
atl1c_down(adapter);
+ netdev_update_features(netdev);
atl1c_up(adapter);
clear_bit(__AT_RESETTING, &adapter->flags);
if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
@@ -1092,10 +1091,8 @@ static void atl1c_configure_tx(struct atl1c_adapter *adapter)
u32 max_pay_load;
u16 tx_offload_thresh;
u32 txq_ctrl_data;
- u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */
u32 max_pay_load_data;
- extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
tx_offload_thresh = MAX_TX_OFFLOAD_THRESH;
AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
(tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
@@ -2585,6 +2582,7 @@ static const struct net_device_ops atl1c_netdev_ops = {
.ndo_set_mac_address = atl1c_set_mac_addr,
.ndo_set_multicast_list = atl1c_set_multi,
.ndo_change_mtu = atl1c_change_mtu,
+ .ndo_fix_features = atl1c_fix_features,
.ndo_do_ioctl = atl1c_ioctl,
.ndo_tx_timeout = atl1c_tx_timeout,
.ndo_get_stats = atl1c_get_stats,
@@ -2605,12 +2603,13 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
atl1c_set_ethtool_ops(netdev);
/* TODO: add when ready */
- netdev->features = NETIF_F_SG |
+ netdev->hw_features = NETIF_F_SG |
NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
NETIF_F_TSO |
NETIF_F_TSO6;
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_RX;
return 0;
}
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index 1209297433b8..6269438d365f 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -51,13 +51,13 @@ static int atl1e_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_INTERNAL;
if (adapter->link_speed != SPEED_0) {
- ecmd->speed = adapter->link_speed;
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
@@ -382,9 +382,6 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
.get_eeprom_len = atl1e_get_eeprom_len,
.get_eeprom = atl1e_get_eeprom,
.set_eeprom = atl1e_set_eeprom,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
- .set_sg = ethtool_op_set_sg,
- .set_tso = ethtool_op_set_tso,
};
void atl1e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 1ff001a8270c..86a912283134 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -691,10 +691,8 @@ static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size)
static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
{
- struct atl1e_tx_ring *tx_ring = NULL;
struct atl1e_rx_ring *rx_ring = NULL;
- tx_ring = &adapter->tx_ring;
rx_ring = &adapter->rx_ring;
rx_ring->real_page_size = adapter->rx_ring.page_size
@@ -1927,11 +1925,7 @@ void atl1e_down(struct atl1e_adapter *adapter)
* reschedule our watchdog timer */
set_bit(__AT_DOWN, &adapter->flags);
-#ifdef NETIF_F_LLTX
netif_stop_queue(netdev);
-#else
- netif_tx_disable(netdev);
-#endif
/* reset MAC to disable all RX/TX */
atl1e_reset_hw(&adapter->hw);
@@ -2223,10 +2217,10 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
netdev->watchdog_timeo = AT_TX_WATCHDOG;
atl1e_set_ethtool_ops(netdev);
- netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- netdev->features |= NETIF_F_LLTX;
- netdev->features |= NETIF_F_TSO;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_TX;
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_LLTX;
return 0;
}
@@ -2509,7 +2503,7 @@ static struct pci_driver atl1e_driver = {
.id_table = atl1e_pci_tbl,
.probe = atl1e_probe,
.remove = __devexit_p(atl1e_remove),
- /* Power Managment Hooks */
+ /* Power Management Hooks */
#ifdef CONFIG_PM
.suspend = atl1e_suspend,
.resume = atl1e_resume,
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 67f40b9c16ed..c5298d1ab744 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2074,9 +2074,6 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
- struct tx_packet_desc *tpd;
-
- tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
if (buffer_info->dma) {
pci_unmap_page(adapter->pdev, buffer_info->dma,
@@ -2572,7 +2569,7 @@ static s32 atl1_up(struct atl1_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
- int irq_flags = IRQF_SAMPLE_RANDOM;
+ int irq_flags = 0;
/* hardware has been reset, we need to reload some things */
atlx_set_multi(netdev);
@@ -2986,6 +2983,11 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_SG;
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+ netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO;
+
+ /* is this valid? see atl1_setup_mac_ctrl() */
+ netdev->features |= NETIF_F_RXCSUM;
+
/*
* patch for some L1 of old version,
* the final version of L1 may not need these
@@ -3229,13 +3231,13 @@ static int atl1_get_settings(struct net_device *netdev,
if (netif_carrier_ok(adapter->netdev)) {
u16 link_speed, link_duplex;
atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
- ecmd->speed = link_speed;
+ ethtool_cmd_speed_set(ecmd, link_speed);
if (link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
@@ -3266,7 +3268,8 @@ static int atl1_set_settings(struct net_device *netdev,
if (ecmd->autoneg == AUTONEG_ENABLE)
hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
else {
- if (ecmd->speed == SPEED_1000) {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (speed == SPEED_1000) {
if (ecmd->duplex != DUPLEX_FULL) {
if (netif_msg_link(adapter))
dev_warn(&adapter->pdev->dev,
@@ -3275,7 +3278,7 @@ static int atl1_set_settings(struct net_device *netdev,
goto exit_sset;
}
hw->media_type = MEDIA_TYPE_1000M_FULL;
- } else if (ecmd->speed == SPEED_100) {
+ } else if (speed == SPEED_100) {
if (ecmd->duplex == DUPLEX_FULL)
hw->media_type = MEDIA_TYPE_100M_FULL;
else
@@ -3595,12 +3598,6 @@ static int atl1_set_pauseparam(struct net_device *netdev,
return 0;
}
-/* FIXME: is this right? -- CHS */
-static u32 atl1_get_rx_csum(struct net_device *netdev)
-{
- return 1;
-}
-
static void atl1_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
@@ -3668,13 +3665,9 @@ static const struct ethtool_ops atl1_ethtool_ops = {
.set_ringparam = atl1_set_ringparam,
.get_pauseparam = atl1_get_pauseparam,
.set_pauseparam = atl1_set_pauseparam,
- .get_rx_csum = atl1_get_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
.get_link = ethtool_op_get_link,
- .set_sg = ethtool_op_set_sg,
.get_strings = atl1_get_strings,
.nway_reset = atl1_nway_reset,
.get_ethtool_stats = atl1_get_ethtool_stats,
.get_sset_count = atl1_get_sset_count,
- .set_tso = ethtool_op_set_tso,
};
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 937ef1afa5db..16249e9b6b95 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1411,9 +1411,8 @@ static int __devinit atl2_probe(struct pci_dev *pdev,
err = -EIO;
-#ifdef NETIF_F_HW_VLAN_TX
+ netdev->hw_features = NETIF_F_SG;
netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
-#endif
/* Init PHY as early as possible due to power saving issue */
atl2_phy_init(&adapter->hw);
@@ -1701,7 +1700,7 @@ static struct pci_driver atl2_driver = {
.id_table = atl2_pci_tbl,
.probe = atl2_probe,
.remove = __devexit_p(atl2_remove),
- /* Power Managment Hooks */
+ /* Power Management Hooks */
.suspend = atl2_suspend,
#ifdef CONFIG_PM
.resume = atl2_resume,
@@ -1770,13 +1769,13 @@ static int atl2_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_INTERNAL;
if (adapter->link_speed != SPEED_0) {
- ecmd->speed = adapter->link_speed;
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
if (adapter->link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
@@ -1840,11 +1839,6 @@ static int atl2_set_settings(struct net_device *netdev,
return 0;
}
-static u32 atl2_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
static u32 atl2_get_msglevel(struct net_device *netdev)
{
return 0;
@@ -2112,12 +2106,6 @@ static const struct ethtool_ops atl2_ethtool_ops = {
.get_eeprom_len = atl2_get_eeprom_len,
.get_eeprom = atl2_get_eeprom,
.set_eeprom = atl2_set_eeprom,
- .get_tx_csum = atl2_get_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
-#ifdef NETIF_F_TSO
- .get_tso = ethtool_op_get_tso,
-#endif
};
static void atl2_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 2e2b76258ab4..a69331e06b8d 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1807,8 +1807,8 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (bp->flags & B44_FLAG_ADV_100FULL)
cmd->advertising |= ADVERTISED_100baseT_Full;
cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
- cmd->speed = (bp->flags & B44_FLAG_100_BASE_T) ?
- SPEED_100 : SPEED_10;
+ ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
+ SPEED_100 : SPEED_10));
cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
DUPLEX_FULL : DUPLEX_HALF;
cmd->port = 0;
@@ -1820,7 +1820,7 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->autoneg == AUTONEG_ENABLE)
cmd->advertising |= ADVERTISED_Autoneg;
if (!netif_running(dev)){
- cmd->speed = 0;
+ ethtool_cmd_speed_set(cmd, 0);
cmd->duplex = 0xff;
}
cmd->maxtxpkt = 0;
@@ -1831,6 +1831,7 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct b44 *bp = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
/* We do not support gigabit. */
if (cmd->autoneg == AUTONEG_ENABLE) {
@@ -1838,8 +1839,8 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full))
return -EINVAL;
- } else if ((cmd->speed != SPEED_100 &&
- cmd->speed != SPEED_10) ||
+ } else if ((speed != SPEED_100 &&
+ speed != SPEED_10) ||
(cmd->duplex != DUPLEX_HALF &&
cmd->duplex != DUPLEX_FULL)) {
return -EINVAL;
@@ -1873,7 +1874,7 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} else {
bp->flags |= B44_FLAG_FORCE_LINK;
bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
- if (cmd->speed == SPEED_100)
+ if (speed == SPEED_100)
bp->flags |= B44_FLAG_100_BASE_T;
if (cmd->duplex == DUPLEX_FULL)
bp->flags |= B44_FLAG_FULL_DUPLEX;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index e94a966af418..f1573d492e90 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -597,7 +597,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
}
/*
- * Change rx mode (promiscous/allmulti) and update multicast list
+ * Change rx mode (promiscuous/allmulti) and update multicast list
*/
static void bcm_enet_set_multicast_list(struct net_device *dev)
{
@@ -839,8 +839,8 @@ static int bcm_enet_open(struct net_device *dev)
if (ret)
goto out_phy_disconnect;
- ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
- IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
+ ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
+ dev->name, dev);
if (ret)
goto out_freeirq;
@@ -1346,7 +1346,8 @@ static int bcm_enet_get_settings(struct net_device *dev,
return phy_ethtool_gset(priv->phydev, cmd);
} else {
cmd->autoneg = 0;
- cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10;
+ ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
+ ? SPEED_100 : SPEED_10));
cmd->duplex = (priv->force_duplex_full) ?
DUPLEX_FULL : DUPLEX_HALF;
cmd->supported = ADVERTISED_10baseT_Half |
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index f803c58b941d..41bbc32123f8 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -84,15 +84,14 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MCC_CQ_LEN 256
#define MAX_RSS_QS 4 /* BE limit is 4 queues/port */
-#define BE_MAX_MSIX_VECTORS (MAX_RSS_QS + 1 + 1)/* RSS qs + 1 def Rx + Tx */
+#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
+#define BE_MAX_MSIX_VECTORS (MAX_RX_QS + 1)/* RX + TX */
#define BE_NAPI_WEIGHT 64
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
#define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST)
#define FW_VER_LEN 32
-#define BE_MAX_VF 32
-
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -154,7 +153,7 @@ struct be_eq_obj {
u16 min_eqd; /* in usecs */
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
- u8 msix_vec_idx;
+ u8 eq_idx;
struct napi_struct napi;
};
@@ -213,7 +212,7 @@ struct be_rx_stats {
struct be_rx_compl_info {
u32 rss_hash;
- u16 vid;
+ u16 vlan_tag;
u16 pkt_size;
u16 rxq_idx;
u16 mac_id;
@@ -276,7 +275,7 @@ struct be_adapter {
spinlock_t mcc_cq_lock;
struct msix_entry msix_entries[BE_MAX_MSIX_VECTORS];
- bool msix_enabled;
+ u32 num_msix_vec;
bool isr_registered;
/* TX Rings */
@@ -287,11 +286,11 @@ struct be_adapter {
u32 cache_line_break[8];
/* Rx rings */
- struct be_rx_obj rx_obj[MAX_RSS_QS + 1]; /* one default non-rss Q */
+ struct be_rx_obj rx_obj[MAX_RX_QS];
u32 num_rx_qs;
u32 big_page_size; /* Compounded page size shared by rx wrbs */
- u8 msix_vec_next_idx;
+ u8 eq_next_idx;
struct be_drv_stats drv_stats;
struct vlan_group *vlan_grp;
@@ -308,10 +307,10 @@ struct be_adapter {
u16 work_counter;
/* Ethtool knobs and info */
- bool rx_csum; /* BE card must perform rx-checksumming */
char fw_ver[FW_VER_LEN];
u32 if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
+ u32 beacon_state; /* for set_phys_id */
bool eeh_err;
bool link_up;
@@ -334,7 +333,7 @@ struct be_adapter {
bool be3_native;
bool sriov_enabled;
- struct be_vf_cfg vf_cfg[BE_MAX_VF];
+ struct be_vf_cfg *vf_cfg;
u8 is_virtfn;
u32 sli_family;
u8 hba_port_num;
@@ -351,6 +350,7 @@ struct be_adapter {
extern const struct ethtool_ops be_ethtool_ops;
+#define msix_enabled(adapter) (adapter->num_msix_vec > 0)
#define tx_stats(adapter) (&adapter->tx_stats)
#define rx_stats(rxo) (&rxo->stats)
@@ -455,18 +455,10 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
{
- u8 data;
u32 sli_intf;
- if (lancer_chip(adapter)) {
- pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
- &sli_intf);
- adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
- } else {
- pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
- pci_read_config_byte(adapter->pdev, 0xFE, &data);
- adapter->is_virtfn = (data != 0xAA);
- }
+ pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+ adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
}
static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
@@ -482,6 +474,11 @@ static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
memcpy(mac, adapter->netdev->dev_addr, 3);
}
+static inline bool be_multi_rxq(const struct be_adapter *adapter)
+{
+ return adapter->num_rx_qs > 1;
+}
+
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
extern void be_link_status_update(struct be_adapter *adapter, bool link_up);
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 5a4a87e7c5ea..f2c90997fabd 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -78,7 +78,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
}
if (compl_status == MCC_STATUS_SUCCESS) {
- if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
+ if ((compl->tag0 == OPCODE_ETH_GET_STATISTICS) &&
+ (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
struct be_cmd_resp_get_stats *resp =
adapter->stats_cmd.va;
be_dws_le_to_cpu(&resp->hw_stats,
@@ -132,7 +133,7 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
struct be_async_event_grp5_pvid_state *evt)
{
if (evt->enabled)
- adapter->pvid = evt->tag;
+ adapter->pvid = le16_to_cpu(evt->tag);
else
adapter->pvid = 0;
}
@@ -292,12 +293,12 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
if (msecs > 4000) {
dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
- be_detect_dump_ue(adapter);
+ if (!lancer_chip(adapter))
+ be_detect_dump_ue(adapter);
return -1;
}
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(1));
+ msleep(1);
msecs++;
} while (true);
@@ -374,23 +375,25 @@ int be_cmd_POST(struct be_adapter *adapter)
{
u16 stage;
int status, timeout = 0;
+ struct device *dev = &adapter->pdev->dev;
do {
status = be_POST_stage_get(adapter, &stage);
if (status) {
- dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
- stage);
+ dev_err(dev, "POST error; stage=0x%x\n", stage);
return -1;
} else if (stage != POST_STAGE_ARMFW_RDY) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(2 * HZ);
+ if (msleep_interruptible(2000)) {
+ dev_err(dev, "Waiting for POST aborted\n");
+ return -EINTR;
+ }
timeout += 2;
} else {
return 0;
}
} while (timeout < 40);
- dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
+ dev_err(dev, "POST timeout; stage=0x%x\n", stage);
return -1;
}
@@ -728,8 +731,6 @@ int be_cmd_cq_create(struct be_adapter *adapter,
if (lancer_chip(adapter)) {
req->hdr.version = 2;
req->page_size = 1; /* 1 for 4K */
- AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
- coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
no_delay);
AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
@@ -1096,6 +1097,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
OPCODE_ETH_GET_STATISTICS, sizeof(*req));
+ wrb->tag1 = CMD_SUBSYSTEM_ETH;
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
sge->len = cpu_to_le32(nonemb_cmd->size);
@@ -1110,7 +1112,7 @@ err:
/* Uses synchronous mcc */
int be_cmd_link_status_query(struct be_adapter *adapter,
- bool *link_up, u8 *mac_speed, u16 *link_speed)
+ bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_link_status *req;
@@ -1186,6 +1188,116 @@ err:
return status;
}
+/* Uses synchronous mcc */
+int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fat *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+ OPCODE_COMMON_MANAGE_FAT);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
+ req->fat_operation = cpu_to_le32(QUERY_FAT);
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
+ if (log_size && resp->log_size)
+ *log_size = le32_to_cpu(resp->log_size) -
+ sizeof(u32);
+ }
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
+{
+ struct be_dma_mem get_fat_cmd;
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_fat *req;
+ struct be_sge *sge;
+ u32 offset = 0, total_size, buf_size,
+ log_offset = sizeof(u32), payload_len;
+ int status;
+
+ if (buf_len == 0)
+ return;
+
+ total_size = buf_len;
+
+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
+ get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
+ get_fat_cmd.size,
+ &get_fat_cmd.dma);
+ if (!get_fat_cmd.va) {
+ status = -ENOMEM;
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while retrieving FAT data\n");
+ return;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ while (total_size) {
+ buf_size = min(total_size, (u32)60*1024);
+ total_size -= buf_size;
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = get_fat_cmd.va;
+ sge = nonembedded_sgl(wrb);
+
+ payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
+ be_wrb_hdr_prepare(wrb, payload_len, false, 1,
+ OPCODE_COMMON_MANAGE_FAT);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_MANAGE_FAT, payload_len);
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
+ sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(get_fat_cmd.size);
+
+ req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
+ req->read_log_offset = cpu_to_le32(log_offset);
+ req->read_log_length = cpu_to_le32(buf_size);
+ req->data_buffer_size = cpu_to_le32(buf_size);
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
+ memcpy(buf + offset,
+ resp->data_buffer,
+ resp->read_log_length);
+ } else {
+ dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
+ goto err;
+ }
+ offset += buf_size;
+ log_offset += buf_size;
+ }
+err:
+ pci_free_consistent(adapter->pdev, get_fat_cmd.size,
+ get_fat_cmd.va,
+ get_fat_cmd.dma);
+ spin_unlock_bh(&adapter->mcc_lock);
+}
+
/* Uses Mbox */
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
{
@@ -1293,12 +1405,24 @@ err:
/* Uses MCC for this command as it may be called in BH context
* Uses synchronous mcc
*/
-int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
+int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_req_promiscuous_config *req;
+ struct be_cmd_req_rx_filter *req;
+ struct be_dma_mem promiscous_cmd;
+ struct be_sge *sge;
int status;
+ memset(&promiscous_cmd, 0, sizeof(struct be_dma_mem));
+ promiscous_cmd.size = sizeof(struct be_cmd_req_rx_filter);
+ promiscous_cmd.va = pci_alloc_consistent(adapter->pdev,
+ promiscous_cmd.size, &promiscous_cmd.dma);
+ if (!promiscous_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure\n");
+ return -ENOMEM;
+ }
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -1306,32 +1430,36 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
status = -EBUSY;
goto err;
}
- req = embedded_payload(wrb);
- be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
+ req = promiscous_cmd.va;
+ sge = nonembedded_sgl(wrb);
- be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_PROMISCUOUS, sizeof(*req));
-
- /* In FW versions X.102.149/X.101.487 and later,
- * the port setting associated only with the
- * issuing pci function will take effect
- */
- if (port_num)
- req->port1_promiscuous = en;
- else
- req->port0_promiscuous = en;
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_NTWK_RX_FILTER);
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
+
+ req->if_id = cpu_to_le32(adapter->if_handle);
+ req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
+ if (en)
+ req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(promiscous_cmd.dma));
+ sge->pa_lo = cpu_to_le32(promiscous_cmd.dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(promiscous_cmd.size);
status = be_mcc_notify_wait(adapter);
err:
spin_unlock_bh(&adapter->mcc_lock);
+ pci_free_consistent(adapter->pdev, promiscous_cmd.size,
+ promiscous_cmd.va, promiscous_cmd.dma);
return status;
}
/*
* Uses MCC for this command as it may be called in BH context
- * (mc == NULL) => multicast promiscous
+ * (mc == NULL) => multicast promiscuous
*/
int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
struct net_device *netdev, struct be_dma_mem *mem)
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 4f254cfaabe2..78256b65cb03 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -186,6 +186,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_NTWK_PMAC_ADD 59
#define OPCODE_COMMON_NTWK_PMAC_DEL 60
#define OPCODE_COMMON_FUNCTION_RESET 61
+#define OPCODE_COMMON_MANAGE_FAT 68
#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
#define OPCODE_COMMON_GET_BEACON_STATE 70
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
@@ -380,6 +381,24 @@ struct be_cmd_resp_cq_create {
u16 rsvd0;
} __packed;
+struct be_cmd_req_get_fat {
+ struct be_cmd_req_hdr hdr;
+ u32 fat_operation;
+ u32 read_log_offset;
+ u32 read_log_length;
+ u32 data_buffer_size;
+ u32 data_buffer[1];
+} __packed;
+
+struct be_cmd_resp_get_fat {
+ struct be_cmd_resp_hdr hdr;
+ u32 log_size;
+ u32 read_log_length;
+ u32 rsvd[2];
+ u32 data_buffer[1];
+} __packed;
+
+
/******************** Create MCCQ ***************************/
/* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field */
@@ -695,13 +714,6 @@ struct be_cmd_req_vlan_config {
u16 normal_vlan[64];
} __packed;
-struct be_cmd_req_promiscuous_config {
- struct be_cmd_req_hdr hdr;
- u8 port0_promiscuous;
- u8 port1_promiscuous;
- u16 rsvd0;
-} __packed;
-
/******************** Multicast MAC Config *******************/
#define BE_MAX_MC 64 /* set mcast promisc if > 64 */
struct macaddr {
@@ -722,6 +734,20 @@ hw_stats_from_cmd(struct be_cmd_resp_get_stats *cmd)
return &cmd->hw_stats;
}
+
+/******************* RX FILTER ******************************/
+struct be_cmd_req_rx_filter {
+ struct be_cmd_req_hdr hdr;
+ u32 global_flags_mask;
+ u32 global_flags;
+ u32 if_flags_mask;
+ u32 if_flags;
+ u32 if_id;
+ u32 multicast_num;
+ struct macaddr mac[BE_MAX_MC];
+};
+
+
/******************** Link Status Query *******************/
struct be_cmd_req_link_status {
struct be_cmd_req_hdr hdr;
@@ -1093,7 +1119,7 @@ extern int be_cmd_rxq_create(struct be_adapter *adapter,
extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type);
extern int be_cmd_link_status_query(struct be_adapter *adapter,
- bool *link_up, u8 *mac_speed, u16 *link_speed);
+ bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom);
extern int be_cmd_reset(struct be_adapter *adapter);
extern int be_cmd_get_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
@@ -1103,8 +1129,7 @@ extern int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd);
extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
u16 *vtag_array, u32 num, bool untagged,
bool promiscuous);
-extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
- u8 port_num, bool en);
+extern int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en);
extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
struct net_device *netdev, struct be_dma_mem *mem);
extern int be_cmd_set_flow_control(struct be_adapter *adapter,
@@ -1148,4 +1173,6 @@ extern void be_detect_dump_ue(struct be_adapter *adapter);
extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_check_native_mode(struct be_adapter *adapter);
+extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
+extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index aac248fbd18b..8e770e8275df 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -156,6 +156,29 @@ be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
}
static int
+be_get_reg_len(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u32 log_size = 0;
+
+ if (be_physfn(adapter))
+ be_cmd_get_reg_len(adapter, &log_size);
+
+ return log_size;
+}
+
+static void
+be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (be_physfn(adapter)) {
+ memset(buf, 0, regs->len);
+ be_cmd_get_regs(adapter, regs->len, buf);
+ }
+}
+
+static int
be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -186,9 +209,9 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
struct be_rx_obj *rxo;
struct be_eq_obj *rx_eq;
struct be_eq_obj *tx_eq = &adapter->tx_eq;
- u32 tx_max, tx_min, tx_cur;
u32 rx_max, rx_min, rx_cur;
int status = 0, i;
+ u32 tx_cur;
if (coalesce->use_adaptive_tx_coalesce == 1)
return -EINVAL;
@@ -227,8 +250,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
}
}
- tx_max = coalesce->tx_coalesce_usecs_high;
- tx_min = coalesce->tx_coalesce_usecs_low;
tx_cur = coalesce->tx_coalesce_usecs;
if (tx_cur > BE_MAX_EQD)
@@ -242,25 +263,6 @@ be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
return 0;
}
-static u32 be_get_rx_csum(struct net_device *netdev)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- return adapter->rx_csum;
-}
-
-static int be_set_rx_csum(struct net_device *netdev, uint32_t data)
-{
- struct be_adapter *adapter = netdev_priv(netdev);
-
- if (data)
- adapter->rx_csum = true;
- else
- adapter->rx_csum = false;
-
- return 0;
-}
-
static void
be_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
@@ -374,19 +376,28 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
status = be_cmd_link_status_query(adapter, &link_up,
- &mac_speed, &link_speed);
+ &mac_speed, &link_speed, 0);
be_link_status_update(adapter, link_up);
/* link_speed is in units of 10 Mbps */
if (link_speed) {
- ecmd->speed = link_speed*10;
+ ethtool_cmd_speed_set(ecmd, link_speed*10);
} else {
switch (mac_speed) {
+ case PHY_LINK_SPEED_10MBPS:
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
+ break;
+ case PHY_LINK_SPEED_100MBPS:
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
+ break;
case PHY_LINK_SPEED_1GBPS:
- ecmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
break;
case PHY_LINK_SPEED_10GBPS:
- ecmd->speed = SPEED_10000;
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
+ break;
+ case PHY_LINK_SPEED_ZERO:
+ ethtool_cmd_speed_set(ecmd, 0);
break;
}
}
@@ -429,14 +440,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
/* Save for future use */
- adapter->link_speed = ecmd->speed;
+ adapter->link_speed = ethtool_cmd_speed(ecmd);
adapter->port_type = ecmd->port;
adapter->transceiver = ecmd->transceiver;
adapter->autoneg = ecmd->autoneg;
dma_free_coherent(&adapter->pdev->dev, phy_cmd.size, phy_cmd.va,
phy_cmd.dma);
} else {
- ecmd->speed = adapter->link_speed;
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
ecmd->port = adapter->port_type;
ecmd->transceiver = adapter->transceiver;
ecmd->autoneg = adapter->autoneg;
@@ -507,29 +518,33 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
}
static int
-be_phys_id(struct net_device *netdev, u32 data)
+be_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
struct be_adapter *adapter = netdev_priv(netdev);
- int status;
- u32 cur;
- be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &cur);
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
+ &adapter->beacon_state);
+ return 1; /* cycle on/off once per second */
- if (cur == BEACON_STATE_ENABLED)
- return 0;
-
- if (data < 2)
- data = 2;
+ case ETHTOOL_ID_ON:
+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
+ BEACON_STATE_ENABLED);
+ break;
- status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
- BEACON_STATE_ENABLED);
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(data*HZ);
+ case ETHTOOL_ID_OFF:
+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
+ BEACON_STATE_DISABLED);
+ break;
- status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
- BEACON_STATE_DISABLED);
+ case ETHTOOL_ID_INACTIVE:
+ be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
+ adapter->beacon_state);
+ }
- return status;
+ return 0;
}
static bool
@@ -646,7 +661,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
}
if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
- &qos_link_speed) != 0) {
+ &qos_link_speed, 0) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
} else if (!mac_speed) {
@@ -660,11 +675,9 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
{
struct be_adapter *adapter = netdev_priv(netdev);
char file_name[ETHTOOL_FLASH_MAX_FILENAME];
- u32 region;
file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
strcpy(file_name, efl->data);
- region = efl->region;
return be_load_fw(adapter, file_name);
}
@@ -725,18 +738,12 @@ const struct ethtool_ops be_ethtool_ops = {
.get_ringparam = be_get_ringparam,
.get_pauseparam = be_get_pauseparam,
.set_pauseparam = be_set_pauseparam,
- .get_rx_csum = be_get_rx_csum,
- .set_rx_csum = be_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = ethtool_op_set_tso,
.get_strings = be_get_stat_strings,
- .phys_id = be_phys_id,
+ .set_phys_id = be_set_phys_id,
.get_sset_count = be_get_sset_count,
.get_ethtool_stats = be_get_ethtool_stats,
+ .get_regs_len = be_get_reg_len,
+ .get_regs = be_get_regs,
.flash_device = be_do_flash,
.self_test = be_self_test,
};
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index d4344a06090b..53d658afea2a 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -155,6 +155,10 @@
/********** SRIOV VF PCICFG OFFSET ********/
#define SRIOV_VF_PCICFG_OFFSET (4096)
+/********** FAT TABLE ********/
+#define RETRIEVE_FAT 0
+#define QUERY_FAT 1
+
/* Flashrom related descriptors */
#define IMAGE_TYPE_FIRMWARE 160
#define IMAGE_TYPE_BOOTCODE 224
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index a71163f1e34b..243172bedfa6 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -116,11 +116,6 @@ static char *ue_status_hi_desc[] = {
"Unknown"
};
-static inline bool be_multi_rxq(struct be_adapter *adapter)
-{
- return (adapter->num_rx_qs > 1);
-}
-
static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -703,15 +698,15 @@ static void be_set_multicast_list(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
if (netdev->flags & IFF_PROMISC) {
- be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
+ be_cmd_promiscuous_config(adapter, true);
adapter->promiscuous = true;
goto done;
}
- /* BE was previously in promiscous mode; disable it */
+ /* BE was previously in promiscuous mode; disable it */
if (adapter->promiscuous) {
adapter->promiscuous = false;
- be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
+ be_cmd_promiscuous_config(adapter, false);
}
/* Enable multicast promisc if num configured exceeds what we support */
@@ -993,9 +988,10 @@ static void be_rx_compl_process(struct be_adapter *adapter,
struct be_rx_obj *rxo,
struct be_rx_compl_info *rxcp)
{
+ struct net_device *netdev = adapter->netdev;
struct sk_buff *skb;
- skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
+ skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
if (unlikely(!skb)) {
if (net_ratelimit())
dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
@@ -1005,20 +1001,24 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb_fill_rx_data(adapter, rxo, skb, rxcp);
- if (likely(adapter->rx_csum && csum_passed(rxcp)))
+ if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
skb->truesize = skb->len + sizeof(struct sk_buff);
- skb->protocol = eth_type_trans(skb, adapter->netdev);
+ skb->protocol = eth_type_trans(skb, netdev);
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = rxcp->rss_hash;
+
if (unlikely(rxcp->vlanf)) {
if (!adapter->vlan_grp || adapter->vlans_added == 0) {
kfree_skb(skb);
return;
}
- vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, rxcp->vid);
+ vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
+ rxcp->vlan_tag);
} else {
netif_receive_skb(skb);
}
@@ -1072,11 +1072,14 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
skb->data_len = rxcp->pkt_size;
skb->truesize += rxcp->pkt_size;
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ skb->rxhash = rxcp->rss_hash;
if (likely(!rxcp->vlanf))
napi_gro_frags(&eq_obj->napi);
else
- vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, rxcp->vid);
+ vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
+ rxcp->vlan_tag);
}
static void be_parse_rx_compl_v1(struct be_adapter *adapter,
@@ -1101,8 +1104,14 @@ static void be_parse_rx_compl_v1(struct be_adapter *adapter,
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
rxcp->pkt_type =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm, compl);
- rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag, compl);
+ rxcp->rss_hash =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
+ if (rxcp->vlanf) {
+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
+ compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
+ compl);
+ }
}
static void be_parse_rx_compl_v0(struct be_adapter *adapter,
@@ -1127,8 +1136,14 @@ static void be_parse_rx_compl_v0(struct be_adapter *adapter,
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
rxcp->pkt_type =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm, compl);
- rxcp->vid = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag, compl);
+ rxcp->rss_hash =
+ AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
+ if (rxcp->vlanf) {
+ rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
+ compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
+ compl);
+ }
}
static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
@@ -1150,15 +1165,20 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
else
be_parse_rx_compl_v0(adapter, compl, rxcp);
- /* vlanf could be wrongly set in some cards. ignore if vtm is not set */
- if ((adapter->function_mode & 0x400) && !rxcp->vtm)
- rxcp->vlanf = 0;
+ if (rxcp->vlanf) {
+ /* vlanf could be wrongly set in some cards.
+ * ignore if vtm is not set */
+ if ((adapter->function_mode & 0x400) && !rxcp->vtm)
+ rxcp->vlanf = 0;
- if (!lancer_chip(adapter))
- rxcp->vid = swab16(rxcp->vid);
+ if (!lancer_chip(adapter))
+ rxcp->vlan_tag = swab16(rxcp->vlan_tag);
- if ((adapter->pvid == rxcp->vid) && !adapter->vlan_tag[rxcp->vid])
- rxcp->vlanf = 0;
+ if (((adapter->pvid & VLAN_VID_MASK) ==
+ (rxcp->vlan_tag & VLAN_VID_MASK)) &&
+ !adapter->vlan_tag[rxcp->vlan_tag])
+ rxcp->vlanf = 0;
+ }
/* As the compl has been parsed, reset it; we wont touch it again */
compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
@@ -1255,7 +1275,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
return txcp;
}
-static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
+static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
{
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_wrb *wrb;
@@ -1282,9 +1302,8 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
queue_tail_inc(txq);
} while (cur_index != last_index);
- atomic_sub(num_wrbs, &txq->used);
-
kfree_skb(sent_skb);
+ return num_wrbs;
}
static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
@@ -1367,7 +1386,7 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_tx_compl *txcp;
- u16 end_idx, cmpl = 0, timeo = 0;
+ u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
struct sk_buff *sent_skb;
bool dummy_wrb;
@@ -1377,12 +1396,14 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp);
- be_tx_compl_process(adapter, end_idx);
+ num_wrbs += be_tx_compl_process(adapter, end_idx);
cmpl++;
}
if (cmpl) {
be_cq_notify(adapter, tx_cq->id, false, cmpl);
+ atomic_sub(num_wrbs, &txq->used);
cmpl = 0;
+ num_wrbs = 0;
}
if (atomic_read(&txq->used) == 0 || ++timeo > 200)
@@ -1402,7 +1423,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
index_adv(&end_idx,
wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
txq->len);
- be_tx_compl_process(adapter, end_idx);
+ num_wrbs = be_tx_compl_process(adapter, end_idx);
+ atomic_sub(num_wrbs, &txq->used);
}
}
@@ -1497,7 +1519,7 @@ static int be_tx_queues_create(struct be_adapter *adapter)
if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
goto tx_eq_free;
- adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+ adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
/* Alloc TX eth compl queue */
@@ -1567,12 +1589,31 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
}
}
+static u32 be_num_rxqs_want(struct be_adapter *adapter)
+{
+ if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
+ return 1 + MAX_RSS_QS; /* one default non-RSS queue */
+ } else {
+ dev_warn(&adapter->pdev->dev,
+ "No support for multiple RX queues\n");
+ return 1;
+ }
+}
+
static int be_rx_queues_create(struct be_adapter *adapter)
{
struct be_queue_info *eq, *q, *cq;
struct be_rx_obj *rxo;
int rc, i;
+ adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
+ msix_enabled(adapter) ?
+ adapter->num_msix_vec - 1 : 1);
+ if (adapter->num_rx_qs != MAX_RX_QS)
+ dev_warn(&adapter->pdev->dev,
+ "Can create only %d RX queues", adapter->num_rx_qs);
+
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
rxo->adapter = adapter;
@@ -1590,7 +1631,7 @@ static int be_rx_queues_create(struct be_adapter *adapter)
if (rc)
goto err;
- rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+ rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
/* CQ */
cq = &rxo->cq;
@@ -1666,11 +1707,11 @@ static irqreturn_t be_intx(int irq, void *dev)
if (!isr)
return IRQ_NONE;
- if ((1 << adapter->tx_eq.msix_vec_idx & isr))
+ if ((1 << adapter->tx_eq.eq_idx & isr))
event_handle(adapter, &adapter->tx_eq);
for_all_rx_queues(adapter, rxo, i) {
- if ((1 << rxo->rx_eq.msix_vec_idx & isr))
+ if ((1 << rxo->rx_eq.eq_idx & isr))
event_handle(adapter, &rxo->rx_eq);
}
}
@@ -1718,12 +1759,15 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
break;
/* Ignore flush completions */
- if (rxcp->num_rcvd) {
+ if (rxcp->num_rcvd && rxcp->pkt_size) {
if (do_gro(rxcp))
be_rx_compl_process_gro(adapter, rxo, rxcp);
else
be_rx_compl_process(adapter, rxo, rxcp);
+ } else if (rxcp->pkt_size == 0) {
+ be_rx_compl_discard(adapter, rxo, rxcp);
}
+
be_rx_stats_update(rxo, rxcp);
}
@@ -1754,12 +1798,12 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp;
int tx_compl = 0, mcc_compl, status = 0;
- u16 end_idx;
+ u16 end_idx, num_wrbs = 0;
while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
wrb_index, txcp);
- be_tx_compl_process(adapter, end_idx);
+ num_wrbs += be_tx_compl_process(adapter, end_idx);
tx_compl++;
}
@@ -1775,6 +1819,8 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
if (tx_compl) {
be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
+ atomic_sub(num_wrbs, &txq->used);
+
/* As Tx wrbs have been freed up, wake up netdev queue if
* it was stopped due to lack of tx wrbs.
*/
@@ -1837,6 +1883,9 @@ static void be_worker(struct work_struct *work)
struct be_rx_obj *rxo;
int i;
+ if (!adapter->ue_detected && !lancer_chip(adapter))
+ be_detect_dump_ue(adapter);
+
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
@@ -1849,9 +1898,6 @@ static void be_worker(struct work_struct *work)
be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
}
- if (!adapter->ue_detected && !lancer_chip(adapter))
- be_detect_dump_ue(adapter);
-
goto reschedule;
}
@@ -1869,60 +1915,43 @@ static void be_worker(struct work_struct *work)
be_post_rx_frags(rxo, GFP_KERNEL);
}
}
- if (!adapter->ue_detected && !lancer_chip(adapter))
- be_detect_dump_ue(adapter);
reschedule:
+ adapter->work_counter++;
schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
}
static void be_msix_disable(struct be_adapter *adapter)
{
- if (adapter->msix_enabled) {
+ if (msix_enabled(adapter)) {
pci_disable_msix(adapter->pdev);
- adapter->msix_enabled = false;
- }
-}
-
-static int be_num_rxqs_get(struct be_adapter *adapter)
-{
- if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
- return 1 + MAX_RSS_QS; /* one default non-RSS queue */
- } else {
- dev_warn(&adapter->pdev->dev,
- "No support for multiple RX queues\n");
- return 1;
+ adapter->num_msix_vec = 0;
}
}
static void be_msix_enable(struct be_adapter *adapter)
{
#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
- int i, status;
+ int i, status, num_vec;
- adapter->num_rx_qs = be_num_rxqs_get(adapter);
+ num_vec = be_num_rxqs_want(adapter) + 1;
- for (i = 0; i < (adapter->num_rx_qs + 1); i++)
+ for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
- status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- adapter->num_rx_qs + 1);
+ status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
if (status == 0) {
goto done;
} else if (status >= BE_MIN_MSIX_VECTORS) {
+ num_vec = status;
if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
- status) == 0) {
- adapter->num_rx_qs = status - 1;
- dev_warn(&adapter->pdev->dev,
- "Could alloc only %d MSIx vectors. "
- "Using %d RX Qs\n", status, adapter->num_rx_qs);
+ num_vec) == 0)
goto done;
- }
}
return;
done:
- adapter->msix_enabled = true;
+ adapter->num_msix_vec = num_vec;
+ return;
}
static void be_sriov_enable(struct be_adapter *adapter)
@@ -1930,7 +1959,20 @@ static void be_sriov_enable(struct be_adapter *adapter)
be_check_sriov_fn_type(adapter);
#ifdef CONFIG_PCI_IOV
if (be_physfn(adapter) && num_vfs) {
- int status;
+ int status, pos;
+ u16 nvfs;
+
+ pos = pci_find_ext_capability(adapter->pdev,
+ PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(adapter->pdev,
+ pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+
+ if (num_vfs > nvfs) {
+ dev_info(&adapter->pdev->dev,
+ "Device supports %d VFs and not %d\n",
+ nvfs, num_vfs);
+ num_vfs = nvfs;
+ }
status = pci_enable_sriov(adapter->pdev, num_vfs);
adapter->sriov_enabled = status ? false : true;
@@ -1951,7 +1993,7 @@ static void be_sriov_disable(struct be_adapter *adapter)
static inline int be_msix_vec_get(struct be_adapter *adapter,
struct be_eq_obj *eq_obj)
{
- return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
+ return adapter->msix_entries[eq_obj->eq_idx].vector;
}
static int be_request_irq(struct be_adapter *adapter,
@@ -2003,8 +2045,7 @@ err_msix:
err:
dev_warn(&adapter->pdev->dev,
"MSIX Request IRQ failed - err %d\n", status);
- pci_disable_msix(adapter->pdev);
- adapter->msix_enabled = false;
+ be_msix_disable(adapter);
return status;
}
@@ -2013,7 +2054,7 @@ static int be_irq_register(struct be_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int status;
- if (adapter->msix_enabled) {
+ if (msix_enabled(adapter)) {
status = be_msix_register(adapter);
if (status == 0)
goto done;
@@ -2046,7 +2087,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
return;
/* INTx */
- if (!adapter->msix_enabled) {
+ if (!msix_enabled(adapter)) {
free_irq(netdev->irq, adapter);
goto done;
}
@@ -2088,7 +2129,7 @@ static int be_close(struct net_device *netdev)
be_cq_notify(adapter, rxo->cq.id, false, 0);
}
- if (adapter->msix_enabled) {
+ if (msix_enabled(adapter)) {
vec = be_msix_vec_get(adapter, tx_eq);
synchronize_irq(vec);
@@ -2141,7 +2182,7 @@ static int be_open(struct net_device *netdev)
be_async_mcc_enable(adapter);
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
- &link_speed);
+ &link_speed, 0);
if (status)
goto err;
be_link_status_update(adapter, link_up);
@@ -2261,7 +2302,7 @@ static int be_setup(struct be_adapter *adapter)
BE_IF_FLAGS_PASS_L3L4_ERRORS;
en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
- if (be_multi_rxq(adapter)) {
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
cap_flags |= BE_IF_FLAGS_RSS;
en_flags |= BE_IF_FLAGS_RSS;
}
@@ -2318,7 +2359,6 @@ static int be_setup(struct be_adapter *adapter)
return 0;
- be_mcc_queues_destroy(adapter);
rx_qs_destroy:
be_rx_queues_destroy(adapter);
tx_qs_destroy:
@@ -2345,6 +2385,7 @@ static int be_clear(struct be_adapter *adapter)
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
+ adapter->eq_next_idx = 0;
if (be_physfn(adapter) && adapter->sriov_enabled)
for (vf = 0; vf < num_vfs; vf++)
@@ -2598,10 +2639,14 @@ static void be_netdev_init(struct net_device *netdev)
struct be_rx_obj *rxo;
int i;
- netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_GRO | NETIF_F_TSO6;
+ netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_TX;
+ if (be_multi_rxq(adapter))
+ netdev->hw_features |= NETIF_F_RXHASH;
+
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -2611,8 +2656,6 @@ static void be_netdev_init(struct net_device *netdev)
netdev->flags |= IFF_MULTICAST;
- adapter->rx_csum = true;
-
/* Default settings for Rx and Tx flow control */
adapter->rx_fc = true;
adapter->tx_fc = true;
@@ -2806,6 +2849,7 @@ static void __devexit be_remove(struct pci_dev *pdev)
be_ctrl_cleanup(adapter);
+ kfree(adapter->vf_cfg);
be_sriov_disable(adapter);
be_msix_disable(adapter);
@@ -2990,16 +3034,23 @@ static int __devinit be_probe(struct pci_dev *pdev,
}
be_sriov_enable(adapter);
+ if (adapter->sriov_enabled) {
+ adapter->vf_cfg = kcalloc(num_vfs,
+ sizeof(struct be_vf_cfg), GFP_KERNEL);
+
+ if (!adapter->vf_cfg)
+ goto free_netdev;
+ }
status = be_ctrl_init(adapter);
if (status)
- goto free_netdev;
+ goto free_vf_cfg;
if (lancer_chip(adapter)) {
status = lancer_test_and_set_rdy_state(adapter);
if (status) {
dev_err(&pdev->dev, "Adapter in non recoverable error\n");
- goto free_netdev;
+ goto ctrl_clean;
}
}
@@ -3042,9 +3093,22 @@ static int __devinit be_probe(struct pci_dev *pdev,
netif_carrier_off(netdev);
if (be_physfn(adapter) && adapter->sriov_enabled) {
+ u8 mac_speed;
+ bool link_up;
+ u16 vf, lnk_speed;
+
status = be_vf_eth_addr_config(adapter);
if (status)
goto unreg_netdev;
+
+ for (vf = 0; vf < num_vfs; vf++) {
+ status = be_cmd_link_status_query(adapter, &link_up,
+ &mac_speed, &lnk_speed, vf + 1);
+ if (!status)
+ adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+ else
+ goto unreg_netdev;
+ }
}
dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
@@ -3061,6 +3125,8 @@ stats_clean:
be_stats_cleanup(adapter);
ctrl_clean:
be_ctrl_cleanup(adapter);
+free_vf_cfg:
+ kfree(adapter->vf_cfg);
free_netdev:
be_sriov_disable(adapter);
free_netdev(netdev);
@@ -3141,18 +3207,19 @@ static int be_resume(struct pci_dev *pdev)
static void be_shutdown(struct pci_dev *pdev)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- struct net_device *netdev = adapter->netdev;
- if (netif_running(netdev))
- cancel_delayed_work_sync(&adapter->work);
+ if (!adapter)
+ return;
- netif_device_detach(netdev);
+ cancel_delayed_work_sync(&adapter->work);
- be_cmd_reset_function(adapter);
+ netif_device_detach(adapter->netdev);
if (adapter->wol)
be_setup_wol(adapter, true);
+ be_cmd_reset_function(adapter);
+
pci_disable_device(pdev);
}
@@ -3264,13 +3331,6 @@ static int __init be_init_module(void)
rx_frag_size = 2048;
}
- if (num_vfs > 32) {
- printk(KERN_WARNING DRV_NAME
- " : Module param num_vfs must not be greater than 32."
- "Using 32\n");
- num_vfs = 32;
- }
-
return pci_register_driver(&be_driver);
}
module_init(be_init_module);
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index 34933cb9569f..fcb9bb3169e0 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -38,6 +38,8 @@
#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
#define bfa_ioc_notify_fail(__ioc) \
((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
+#define bfa_ioc_sync_start(__ioc) \
+ ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
#define bfa_ioc_sync_join(__ioc) \
((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
#define bfa_ioc_sync_leave(__ioc) \
@@ -80,7 +82,6 @@ static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
u32 boot_param);
static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
-static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
char *serial_num);
static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
@@ -602,7 +603,7 @@ bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
switch (event) {
case IOCPF_E_SEMLOCKED:
if (bfa_ioc_firmware_lock(ioc)) {
- if (bfa_ioc_sync_complete(ioc)) {
+ if (bfa_ioc_sync_start(ioc)) {
iocpf->retry_count = 0;
bfa_ioc_sync_join(ioc);
bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
@@ -1272,13 +1273,12 @@ bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
void
bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
{
- u32 pgnum, pgoff;
+ u32 pgnum;
u32 loff = 0;
int i;
u32 *fwsig = (u32 *) fwhdr;
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
- pgoff = bfa_ioc_smem_pgoff(ioc, loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
@@ -1314,7 +1314,7 @@ bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
* execution context (driver/bios) must match.
*/
static bool
-bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
+bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
{
struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
@@ -1325,7 +1325,7 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
if (fwhdr.signature != drv_fwhdr->signature)
return false;
- if (fwhdr.exec != drv_fwhdr->exec)
+ if (swab32(fwhdr.param) != boot_env)
return false;
return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
@@ -1352,9 +1352,12 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
{
enum bfi_ioc_state ioc_fwstate;
bool fwvalid;
+ u32 boot_env;
ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
+ boot_env = BFI_BOOT_LOADER_OS;
+
if (force)
ioc_fwstate = BFI_IOC_UNINIT;
@@ -1362,10 +1365,10 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
* check if firmware is valid
*/
fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
- false : bfa_ioc_fwver_valid(ioc);
+ false : bfa_ioc_fwver_valid(ioc, boot_env);
if (!fwvalid) {
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
return;
}
@@ -1396,7 +1399,7 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
/**
* Initialize the h/w for any other states.
*/
- bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
+ bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, boot_env);
}
void
@@ -1506,10 +1509,10 @@ bfa_ioc_hb_stop(struct bfa_ioc *ioc)
*/
static void
bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
- u32 boot_param)
+ u32 boot_env)
{
u32 *fwimg;
- u32 pgnum, pgoff;
+ u32 pgnum;
u32 loff = 0;
u32 chunkno = 0;
u32 i;
@@ -1522,7 +1525,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
pgnum = bfa_ioc_smem_pgnum(ioc, loff);
- pgoff = bfa_ioc_smem_pgoff(ioc, loff);
writel(pgnum, ioc->ioc_regs.host_page_num_fn);
@@ -1558,10 +1560,10 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
/*
* Set boot type and boot param at the end.
*/
- writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
+ writel(boot_type, ((ioc->ioc_regs.smem_page_start)
+ (BFI_BOOT_TYPE_OFF)));
- writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
- + (BFI_BOOT_PARAM_OFF)));
+ writel(boot_env, ((ioc->ioc_regs.smem_page_start)
+ + (BFI_BOOT_LOADER_OFF)));
}
static void
@@ -1721,7 +1723,7 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
* as the entry vector.
*/
static void
-bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
+bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_env)
{
void __iomem *rb;
@@ -1734,7 +1736,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
* Initialize IOC state of all functions on a chip reset.
*/
rb = ioc->pcidev.pci_bar_kva;
- if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
+ if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
} else {
@@ -1743,7 +1745,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
}
bfa_ioc_msgflush(ioc);
- bfa_ioc_download_fw(ioc, boot_type, boot_param);
+ bfa_ioc_download_fw(ioc, boot_type, boot_env);
/**
* Enable interrupts just before starting LPU
@@ -1920,12 +1922,6 @@ bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
}
-static u32
-bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
-{
- return PSS_SMEM_PGOFF(fmaddr);
-}
-
/**
* Register mailbox message handler function, to be called by common modules
*/
@@ -2219,13 +2215,9 @@ bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
static void
bfa_ioc_recover(struct bfa_ioc *ioc)
{
- u16 bdf;
-
- bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
- ioc->pcidev.device_id);
-
- pr_crit("Firmware heartbeat failure at %d", bdf);
- BUG_ON(1);
+ pr_crit("Heart Beat of IOC has failed\n");
+ bfa_ioc_stats(ioc, ioc_hbfails);
+ bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
}
static void
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index e4974bc24ef6..bd48abee781f 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -194,6 +194,7 @@ struct bfa_ioc_hwif {
bool msix);
void (*ioc_notify_fail) (struct bfa_ioc *ioc);
void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
+ bool (*ioc_sync_start) (struct bfa_ioc *ioc);
void (*ioc_sync_join) (struct bfa_ioc *ioc);
void (*ioc_sync_leave) (struct bfa_ioc *ioc);
void (*ioc_sync_ack) (struct bfa_ioc *ioc);
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 469997c4ffd1..87aecdf22cf9 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -41,6 +41,7 @@ static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
+static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
@@ -63,6 +64,7 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
+ nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
@@ -345,6 +347,32 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
/**
* Synchronized IOC failure processing routines
*/
+static bool
+bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
+{
+ u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
+ u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
+
+ /*
+ * Driver load time. If the sync required bit for this PCI fn
+ * is set, it is due to an unclean exit by the driver for this
+ * PCI fn in the previous incarnation. Whoever comes here first
+ * should clean it up, no matter which PCI fn.
+ */
+
+ if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
+ writel(0, ioc->ioc_regs.ioc_fail_sync);
+ writel(1, ioc->ioc_regs.ioc_usage_reg);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
+ writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
+ return true;
+ }
+
+ return bfa_ioc_ct_sync_complete(ioc);
+}
+/**
+ * Synchronized IOC failure processing routines
+ */
static void
bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
{
diff --git a/drivers/net/bna/bfi.h b/drivers/net/bna/bfi.h
index a97396811050..6050379526f7 100644
--- a/drivers/net/bna/bfi.h
+++ b/drivers/net/bna/bfi.h
@@ -184,12 +184,14 @@ enum bfi_mclass {
#define BFI_IOC_MSGLEN_MAX 32 /* 32 bytes */
#define BFI_BOOT_TYPE_OFF 8
-#define BFI_BOOT_PARAM_OFF 12
+#define BFI_BOOT_LOADER_OFF 12
-#define BFI_BOOT_TYPE_NORMAL 0 /* param is device id */
+#define BFI_BOOT_TYPE_NORMAL 0
#define BFI_BOOT_TYPE_FLASH 1
#define BFI_BOOT_TYPE_MEMTEST 2
+#define BFI_BOOT_LOADER_OS 0
+
#define BFI_BOOT_MEMTEST_RES_ADDR 0x900
#define BFI_BOOT_MEMTEST_RES_SIG 0xA0A1A2A3
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
index e1527472b961..53b14169e363 100644
--- a/drivers/net/bna/bna_ctrl.c
+++ b/drivers/net/bna/bna_ctrl.c
@@ -246,7 +246,6 @@ static void
bna_mbox_flush_q(struct bna *bna, struct list_head *q)
{
struct bna_mbox_qe *mb_qe = NULL;
- struct bfi_mhdr *cmd_h;
struct list_head *mb_q;
void (*cbfn)(void *arg, int status);
void *cbarg;
@@ -260,7 +259,6 @@ bna_mbox_flush_q(struct bna *bna, struct list_head *q)
bfa_q_qe_init(mb_qe);
bna->mbox_mod.msg_pending--;
- cmd_h = (struct bfi_mhdr *)(&mb_qe->cmd.msg[0]);
if (cbfn)
cbfn(cbarg, BNA_CB_NOT_EXEC);
}
@@ -2774,23 +2772,6 @@ bna_rit_mod_init(struct bna_rit_mod *rit_mod,
}
}
-static void
-bna_rit_mod_uninit(struct bna_rit_mod *rit_mod)
-{
- struct bna_rit_segment *rit_segment;
- struct list_head *qe;
- int i;
- int j;
-
- for (i = 0; i < BFI_RIT_SEG_TOTAL_POOLS; i++) {
- j = 0;
- list_for_each(qe, &rit_mod->rit_seg_pool[i]) {
- rit_segment = (struct bna_rit_segment *)qe;
- j++;
- }
- }
-}
-
/*
* Public functions
*/
@@ -2977,8 +2958,6 @@ bna_uninit(struct bna *bna)
bna_ucam_mod_uninit(&bna->ucam_mod);
- bna_rit_mod_uninit(&bna->rit_mod);
-
bna_ib_mod_uninit(&bna->ib_mod);
bna_rx_mod_uninit(&bna->rx_mod);
diff --git a/drivers/net/bna/bna_hw.h b/drivers/net/bna/bna_hw.h
index 806b224a4c63..6cb89692f5c1 100644
--- a/drivers/net/bna/bna_hw.h
+++ b/drivers/net/bna/bna_hw.h
@@ -897,7 +897,7 @@ static struct bna_ritseg_pool_cfg name[BFI_RIT_SEG_TOTAL_POOLS] = \
* Catapult RSS Table Base Offset Address
*
* Exists in RAD memory space.
- * Each entry is 352 bits, but alligned on
+ * Each entry is 352 bits, but aligned on
* 64 byte (512 bit) boundary. Accessed
* 4 byte words, the whole entry can be
* broken into 11 word accesses.
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
index 58c7664040dc..380085cc3088 100644
--- a/drivers/net/bna/bna_txrx.c
+++ b/drivers/net/bna/bna_txrx.c
@@ -2229,14 +2229,11 @@ void
bna_rit_create(struct bna_rx *rx)
{
struct list_head *qe_rxp;
- struct bna *bna;
struct bna_rxp *rxp;
struct bna_rxq *q0 = NULL;
struct bna_rxq *q1 = NULL;
int offset;
- bna = rx->bna;
-
offset = 0;
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
@@ -2830,7 +2827,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
struct bna_mem_descr *hpage_mem; /* hdr page mem */
struct bna_mem_descr *dpage_mem; /* data page mem */
- int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0, ret;
+ int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
int dpage_count, hpage_count, rcb_idx;
struct bna_ib_config ibcfg;
/* Fail if we don't have enough RXPs, RXQs */
@@ -2924,7 +2921,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
- ret = bna_ib_config(rxp->cq.ib, &ibcfg);
+ bna_ib_config(rxp->cq.ib, &ibcfg);
/* Link rxqs to rxp */
_rxp_add_rxqs(rxp, q0, q1);
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 9f356d5d0f33..e588511f47fb 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -501,7 +501,7 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
skb_put(skb, ntohs(cmpl->length));
if (likely
- (bnad->rx_csum &&
+ ((bnad->netdev->features & NETIF_F_RXCSUM) &&
(((flags & BNA_CQ_EF_IPV4) &&
(flags & BNA_CQ_EF_L3_CKSUM_OK)) ||
(flags & BNA_CQ_EF_IPV6)) &&
@@ -1837,7 +1837,6 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
/* Initialize the Rx event handlers */
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
- rx_cbfn.rcb_destroy_cbfn = NULL;
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
@@ -2903,23 +2902,20 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
{
struct net_device *netdev = bnad->netdev;
- netdev->features |= NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_TX;
- netdev->features |= NETIF_F_GRO;
- pr_warn("bna: GRO enabled, using kernel stack GRO\n");
+ netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->features |=
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
-
- netdev->vlan_features = netdev->features;
netdev->mem_start = bnad->mmio_start;
netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
@@ -2970,7 +2966,6 @@ bnad_init(struct bnad *bnad,
bnad->txq_depth = BNAD_TXQ_DEPTH;
bnad->rxq_depth = BNAD_RXQ_DEPTH;
- bnad->rx_csum = true;
bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index a89117fa4970..ccdabad0a40c 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -237,8 +237,6 @@ struct bnad {
struct bna_rx_config rx_config[BNAD_MAX_RXS];
struct bna_tx_config tx_config[BNAD_MAX_TXS];
- u32 rx_csum;
-
void __iomem *bar0; /* BAR0 address */
struct bna bna;
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 142d6047da27..3330cd78da2c 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -237,10 +237,10 @@ bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
cmd->phy_address = 0;
if (netif_carrier_ok(netdev)) {
- cmd->speed = SPEED_10000;
+ ethtool_cmd_speed_set(cmd, SPEED_10000);
cmd->duplex = DUPLEX_FULL;
} else {
- cmd->speed = -1;
+ ethtool_cmd_speed_set(cmd, -1);
cmd->duplex = -1;
}
cmd->transceiver = XCVR_EXTERNAL;
@@ -256,7 +256,8 @@ bnad_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
/* 10G full duplex setting supported only */
if (cmd->autoneg == AUTONEG_ENABLE)
return -EOPNOTSUPP; else {
- if ((cmd->speed == SPEED_10000) && (cmd->duplex == DUPLEX_FULL))
+ if ((ethtool_cmd_speed(cmd) == SPEED_10000)
+ && (cmd->duplex == DUPLEX_FULL))
return 0;
}
@@ -806,61 +807,6 @@ bnad_set_pauseparam(struct net_device *netdev,
return 0;
}
-static u32
-bnad_get_rx_csum(struct net_device *netdev)
-{
- u32 rx_csum;
- struct bnad *bnad = netdev_priv(netdev);
-
- rx_csum = bnad->rx_csum;
- return rx_csum;
-}
-
-static int
-bnad_set_rx_csum(struct net_device *netdev, u32 rx_csum)
-{
- struct bnad *bnad = netdev_priv(netdev);
-
- mutex_lock(&bnad->conf_mutex);
- bnad->rx_csum = rx_csum;
- mutex_unlock(&bnad->conf_mutex);
- return 0;
-}
-
-static int
-bnad_set_tx_csum(struct net_device *netdev, u32 tx_csum)
-{
- struct bnad *bnad = netdev_priv(netdev);
-
- mutex_lock(&bnad->conf_mutex);
- if (tx_csum) {
- netdev->features |= NETIF_F_IP_CSUM;
- netdev->features |= NETIF_F_IPV6_CSUM;
- } else {
- netdev->features &= ~NETIF_F_IP_CSUM;
- netdev->features &= ~NETIF_F_IPV6_CSUM;
- }
- mutex_unlock(&bnad->conf_mutex);
- return 0;
-}
-
-static int
-bnad_set_tso(struct net_device *netdev, u32 tso)
-{
- struct bnad *bnad = netdev_priv(netdev);
-
- mutex_lock(&bnad->conf_mutex);
- if (tso) {
- netdev->features |= NETIF_F_TSO;
- netdev->features |= NETIF_F_TSO6;
- } else {
- netdev->features &= ~NETIF_F_TSO;
- netdev->features &= ~NETIF_F_TSO6;
- }
- mutex_unlock(&bnad->conf_mutex);
- return 0;
-}
-
static void
bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string)
{
@@ -1256,14 +1202,6 @@ static struct ethtool_ops bnad_ethtool_ops = {
.set_ringparam = bnad_set_ringparam,
.get_pauseparam = bnad_get_pauseparam,
.set_pauseparam = bnad_set_pauseparam,
- .get_rx_csum = bnad_get_rx_csum,
- .set_rx_csum = bnad_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = bnad_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = bnad_set_tso,
.get_strings = bnad_get_strings,
.get_ethtool_stats = bnad_get_ethtool_stats,
.get_sset_count = bnad_get_sset_count
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 8e6d618b5305..57d3293c65bd 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -3174,7 +3174,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
}
skb_checksum_none_assert(skb);
- if (bp->rx_csum &&
+ if ((bp->dev->features & NETIF_F_RXCSUM) &&
(status & (L2_FHDR_STATUS_TCP_SEGMENT |
L2_FHDR_STATUS_UDP_DATAGRAM))) {
@@ -6696,17 +6696,16 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (bp->autoneg & AUTONEG_SPEED) {
cmd->autoneg = AUTONEG_ENABLE;
- }
- else {
+ } else {
cmd->autoneg = AUTONEG_DISABLE;
}
if (netif_carrier_ok(dev)) {
- cmd->speed = bp->line_speed;
+ ethtool_cmd_speed_set(cmd, bp->line_speed);
cmd->duplex = bp->duplex;
}
else {
- cmd->speed = -1;
+ ethtool_cmd_speed_set(cmd, -1);
cmd->duplex = -1;
}
spin_unlock_bh(&bp->phy_lock);
@@ -6758,21 +6757,21 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
advertising |= ADVERTISED_Autoneg;
}
else {
+ u32 speed = ethtool_cmd_speed(cmd);
if (cmd->port == PORT_FIBRE) {
- if ((cmd->speed != SPEED_1000 &&
- cmd->speed != SPEED_2500) ||
+ if ((speed != SPEED_1000 &&
+ speed != SPEED_2500) ||
(cmd->duplex != DUPLEX_FULL))
goto err_out_unlock;
- if (cmd->speed == SPEED_2500 &&
+ if (speed == SPEED_2500 &&
!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
goto err_out_unlock;
- }
- else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
+ } else if (speed == SPEED_1000 || speed == SPEED_2500)
goto err_out_unlock;
autoneg &= ~AUTONEG_SPEED;
- req_line_speed = cmd->speed;
+ req_line_speed = speed;
req_duplex = cmd->duplex;
advertising = 0;
}
@@ -7189,38 +7188,6 @@ bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
return 0;
}
-static u32
-bnx2_get_rx_csum(struct net_device *dev)
-{
- struct bnx2 *bp = netdev_priv(dev);
-
- return bp->rx_csum;
-}
-
-static int
-bnx2_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct bnx2 *bp = netdev_priv(dev);
-
- bp->rx_csum = data;
- return 0;
-}
-
-static int
-bnx2_set_tso(struct net_device *dev, u32 data)
-{
- struct bnx2 *bp = netdev_priv(dev);
-
- if (data) {
- dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
- dev->features |= NETIF_F_TSO6;
- } else
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
- NETIF_F_TSO_ECN);
- return 0;
-}
-
static struct {
char string[ETH_GSTRING_LEN];
} bnx2_stats_str_arr[] = {
@@ -7495,82 +7462,74 @@ bnx2_get_ethtool_stats(struct net_device *dev,
}
static int
-bnx2_phys_id(struct net_device *dev, u32 data)
+bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
{
struct bnx2 *bp = netdev_priv(dev);
- int i;
- u32 save;
- bnx2_set_power_state(bp, PCI_D0);
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ bnx2_set_power_state(bp, PCI_D0);
+
+ bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
+ REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
+ BNX2_EMAC_LED_1000MB_OVERRIDE |
+ BNX2_EMAC_LED_100MB_OVERRIDE |
+ BNX2_EMAC_LED_10MB_OVERRIDE |
+ BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
+ BNX2_EMAC_LED_TRAFFIC);
+ break;
- if (data == 0)
- data = 2;
+ case ETHTOOL_ID_OFF:
+ REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
+ break;
- save = REG_RD(bp, BNX2_MISC_CFG);
- REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
+ case ETHTOOL_ID_INACTIVE:
+ REG_WR(bp, BNX2_EMAC_LED, 0);
+ REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
- for (i = 0; i < (data * 2); i++) {
- if ((i % 2) == 0) {
- REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
- }
- else {
- REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
- BNX2_EMAC_LED_1000MB_OVERRIDE |
- BNX2_EMAC_LED_100MB_OVERRIDE |
- BNX2_EMAC_LED_10MB_OVERRIDE |
- BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
- BNX2_EMAC_LED_TRAFFIC);
- }
- msleep_interruptible(500);
- if (signal_pending(current))
- break;
+ if (!netif_running(dev))
+ bnx2_set_power_state(bp, PCI_D3hot);
+ break;
}
- REG_WR(bp, BNX2_EMAC_LED, 0);
- REG_WR(bp, BNX2_MISC_CFG, save);
-
- if (!netif_running(dev))
- bnx2_set_power_state(bp, PCI_D3hot);
return 0;
}
-static int
-bnx2_set_tx_csum(struct net_device *dev, u32 data)
+static u32
+bnx2_fix_features(struct net_device *dev, u32 features)
{
struct bnx2 *bp = netdev_priv(dev);
- if (CHIP_NUM(bp) == CHIP_NUM_5709)
- return ethtool_op_set_tx_ipv6_csum(dev, data);
- else
- return ethtool_op_set_tx_csum(dev, data);
+ if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
+ features |= NETIF_F_HW_VLAN_RX;
+
+ return features;
}
static int
-bnx2_set_flags(struct net_device *dev, u32 data)
+bnx2_set_features(struct net_device *dev, u32 features)
{
struct bnx2 *bp = netdev_priv(dev);
- int rc;
-
- if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) &&
- !(data & ETH_FLAG_RXVLAN))
- return -EINVAL;
/* TSO with VLAN tag won't work with current firmware */
- if (!(data & ETH_FLAG_TXVLAN))
- return -EINVAL;
-
- rc = ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH | ETH_FLAG_RXVLAN |
- ETH_FLAG_TXVLAN);
- if (rc)
- return rc;
+ if (features & NETIF_F_HW_VLAN_TX)
+ dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
+ else
+ dev->vlan_features &= ~NETIF_F_ALL_TSO;
- if ((!!(data & ETH_FLAG_RXVLAN) !=
+ if ((!!(features & NETIF_F_HW_VLAN_RX) !=
!!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
netif_running(dev)) {
bnx2_netif_stop(bp, false);
+ dev->features = features;
bnx2_set_rx_mode(dev);
bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
bnx2_netif_start(bp, false);
+ return 1;
}
return 0;
@@ -7595,18 +7554,11 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
.set_ringparam = bnx2_set_ringparam,
.get_pauseparam = bnx2_get_pauseparam,
.set_pauseparam = bnx2_set_pauseparam,
- .get_rx_csum = bnx2_get_rx_csum,
- .set_rx_csum = bnx2_set_rx_csum,
- .set_tx_csum = bnx2_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
- .set_tso = bnx2_set_tso,
.self_test = bnx2_self_test,
.get_strings = bnx2_get_strings,
- .phys_id = bnx2_phys_id,
+ .set_phys_id = bnx2_set_phys_id,
.get_ethtool_stats = bnx2_get_ethtool_stats,
.get_sset_count = bnx2_get_sset_count,
- .set_flags = bnx2_set_flags,
- .get_flags = ethtool_op_get_flags,
};
/* Called with rtnl_lock */
@@ -8118,8 +8070,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->tx_ring_size = MAX_TX_DESC_CNT;
bnx2_set_rx_ring_size(bp, 255);
- bp->rx_csum = 1;
-
bp->tx_quick_cons_trip_int = 2;
bp->tx_quick_cons_trip = 20;
bp->tx_ticks_int = 18;
@@ -8311,17 +8261,14 @@ static const struct net_device_ops bnx2_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = bnx2_change_mac_addr,
.ndo_change_mtu = bnx2_change_mtu,
+ .ndo_fix_features = bnx2_fix_features,
+ .ndo_set_features = bnx2_set_features,
.ndo_tx_timeout = bnx2_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2,
#endif
};
-static inline void vlan_features_add(struct net_device *dev, u32 flags)
-{
- dev->vlan_features |= flags;
-}
-
static int __devinit
bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -8361,20 +8308,17 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(dev->dev_addr, bp->mac_addr, 6);
memcpy(dev->perm_addr, bp->mac_addr, 6);
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
- NETIF_F_RXHASH;
- vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
- dev->features |= NETIF_F_IPV6_CSUM;
- vlan_features_add(dev, NETIF_F_IPV6_CSUM);
- }
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
- vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
- if (CHIP_NUM(bp) == CHIP_NUM_5709) {
- dev->features |= NETIF_F_TSO6;
- vlan_features_add(dev, NETIF_F_TSO6);
- }
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO_ECN |
+ NETIF_F_RXHASH | NETIF_F_RXCSUM;
+
+ if (CHIP_NUM(bp) == CHIP_NUM_5709)
+ dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+ dev->vlan_features = dev->hw_features;
+ dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->features |= dev->hw_features;
+
if ((rc = register_netdev(dev))) {
dev_err(&pdev->dev, "Cannot register net device\n");
goto error;
@@ -8413,6 +8357,8 @@ bnx2_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
+ del_timer_sync(&bp->timer);
+
if (bp->mips_firmware)
release_firmware(bp->mips_firmware);
if (bp->rv2p_firmware)
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 68020451dc4f..bf371f6fe154 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6754,8 +6754,6 @@ struct bnx2 {
u32 rx_max_ring_idx;
u32 rx_max_pg_ring_idx;
- u32 rx_csum;
-
/* TX constants */
int tx_ring_size;
u32 tx_wake_thresh;
@@ -6922,6 +6920,7 @@ struct bnx2 {
u8 num_tx_rings;
u8 num_rx_rings;
+ u32 leds_save;
u32 idle_chk_status_idx;
#ifdef BCM_CNIC
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index b7ff87b35fbb..16a76f074df5 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -1,6 +1,6 @@
/* bnx2x.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,8 +22,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.62.11-0"
-#define DRV_MODULE_RELDATE "2011/01/31"
+#define DRV_MODULE_VERSION "1.62.12-0"
+#define DRV_MODULE_RELDATE "2011/03/20"
#define BNX2X_BC_VER 0x040200
#define BNX2X_MULTI_QUEUE
@@ -473,7 +473,8 @@ struct bnx2x_fastpath {
#define NUM_RX_BD (RX_DESC_CNT * NUM_RX_RINGS)
#define MAX_RX_BD (NUM_RX_BD - 1)
#define MAX_RX_AVAIL (MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
-#define MIN_RX_AVAIL 128
+#define MIN_RX_SIZE_TPA 72
+#define MIN_RX_SIZE_NONTPA 10
#define INIT_JUMBO_RX_RING_SIZE MAX_RX_AVAIL
#define INIT_RX_RING_SIZE MAX_RX_AVAIL
#define NEXT_RX_IDX(x) ((((x) & RX_DESC_MASK) == \
@@ -893,6 +894,22 @@ typedef enum {
(&bp->def_status_blk->sp_sb.\
index_values[HC_SP_INDEX_EQ_CONS])
+/* This is a data that will be used to create a link report message.
+ * We will keep the data used for the last link report in order
+ * to prevent reporting the same link parameters twice.
+ */
+struct bnx2x_link_report_data {
+ u16 line_speed; /* Effective line speed */
+ unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
+};
+
+enum {
+ BNX2X_LINK_REPORT_FD, /* Full DUPLEX */
+ BNX2X_LINK_REPORT_LINK_DOWN,
+ BNX2X_LINK_REPORT_RX_FC_ON,
+ BNX2X_LINK_REPORT_TX_FC_ON,
+};
+
struct bnx2x {
/* Fields used in the tx and intr/napi performance paths
* are grouped together in the beginning of the structure
@@ -918,7 +935,6 @@ struct bnx2x {
int tx_ring_size;
- u32 rx_csum;
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVREHEAD (ETH_HLEN + 8 + 8)
#define ETH_MIN_PACKET_SIZE 60
@@ -1026,6 +1042,9 @@ struct bnx2x {
struct link_params link_params;
struct link_vars link_vars;
+ u32 link_cnt;
+ struct bnx2x_link_report_data last_reported_link;
+
struct mdio_if_info mdio;
struct bnx2x_common common;
@@ -1220,7 +1239,7 @@ struct bnx2x {
struct bnx2x_dcbx_port_params dcbx_port_params;
int dcb_version;
- /* DCBX Negotation results */
+ /* DCBX Negotiation results */
struct dcbx_features dcbx_local_feat;
u32 dcbx_error;
u32 pending_max;
@@ -1442,6 +1461,8 @@ struct bnx2x_func_init_params {
#define WAIT_RAMROD_POLL 0x01
#define WAIT_RAMROD_COMMON 0x02
+void bnx2x_read_mf_cfg(struct bnx2x *bp);
+
/* dmae */
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index e83ac6dd6fc0..6ee6601b5176 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,6 @@
/* bnx2x_cmn.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -27,6 +27,49 @@
static int bnx2x_setup_irqs(struct bnx2x *bp);
+/**
+ * bnx2x_bz_fp - zero content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @index: fastpath index to be zeroed
+ *
+ * Makes sure the contents of the bp->fp[index].napi is kept
+ * intact.
+ */
+static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct napi_struct orig_napi = fp->napi;
+ /* bzero bnx2x_fastpath contents */
+ memset(fp, 0, sizeof(*fp));
+
+ /* Restore the NAPI object as it has been already initialized */
+ fp->napi = orig_napi;
+}
+
+/**
+ * bnx2x_move_fp - move content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @from: source FP index
+ * @to: destination FP index
+ *
+ * Makes sure the contents of the bp->fp[to].napi is kept
+ * intact.
+ */
+static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
+{
+ struct bnx2x_fastpath *from_fp = &bp->fp[from];
+ struct bnx2x_fastpath *to_fp = &bp->fp[to];
+ struct napi_struct orig_napi = to_fp->napi;
+ /* Move bnx2x_fastpath contents */
+ memcpy(to_fp, from_fp, sizeof(*to_fp));
+ to_fp->index = to;
+
+ /* Restore the NAPI object as it has been already initialized */
+ to_fp->napi = orig_napi;
+}
+
/* free skb in the packet ring at pos idx
* return idx of last bd freed
*/
@@ -265,13 +308,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
*/
#define TPA_TSTAMP_OPT_LEN 12
/**
- * Calculate the approximate value of the MSS for this
- * aggregation using the first packet of it.
+ * bnx2x_set_lro_mss - calculate the approximate value of the MSS
*
- * @param bp
- * @param parsing_flags Parsing flags from the START CQE
- * @param len_on_bd Total length of the first packet for the
- * aggregation.
+ * @bp: driver handle
+ * @parsing_flags: parsing flags from the START CQE
+ * @len_on_bd: total length of the first packet for the
+ * aggregation.
+ *
+ * Approximate value of the MSS for this aggregation calculated using
+ * the first packet of it.
*/
static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
u16 len_on_bd)
@@ -640,7 +685,7 @@ reuse_rx:
skb_checksum_none_assert(skb);
- if (bp->rx_csum) {
+ if (bp->dev->features & NETIF_F_RXCSUM) {
if (likely(BNX2X_RX_CSUM_OK(cqe)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
@@ -758,35 +803,119 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
return line_speed;
}
+/**
+ * bnx2x_fill_report_data - fill link report data to report
+ *
+ * @bp: driver handle
+ * @data: link state to update
+ *
+ * It uses a none-atomic bit operations because is called under the mutex.
+ */
+static inline void bnx2x_fill_report_data(struct bnx2x *bp,
+ struct bnx2x_link_report_data *data)
+{
+ u16 line_speed = bnx2x_get_mf_speed(bp);
+
+ memset(data, 0, sizeof(*data));
+
+ /* Fill the report data: efective line speed */
+ data->line_speed = line_speed;
+
+ /* Link is down */
+ if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
+ __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &data->link_report_flags);
+
+ /* Full DUPLEX */
+ if (bp->link_vars.duplex == DUPLEX_FULL)
+ __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
+
+ /* Rx Flow Control is ON */
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
+ __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
+
+ /* Tx Flow Control is ON */
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
+}
+
+/**
+ * bnx2x_link_report - report link status to OS.
+ *
+ * @bp: driver handle
+ *
+ * Calls the __bnx2x_link_report() under the same locking scheme
+ * as a link/PHY state managing code to ensure a consistent link
+ * reporting.
+ */
+
void bnx2x_link_report(struct bnx2x *bp)
{
- if (bp->flags & MF_FUNC_DIS) {
- netif_carrier_off(bp->dev);
- netdev_err(bp->dev, "NIC Link is Down\n");
- return;
- }
+ bnx2x_acquire_phy_lock(bp);
+ __bnx2x_link_report(bp);
+ bnx2x_release_phy_lock(bp);
+}
- if (bp->link_vars.link_up) {
- u16 line_speed;
+/**
+ * __bnx2x_link_report - report link status to OS.
+ *
+ * @bp: driver handle
+ *
+ * None atomic inmlementation.
+ * Should be called under the phy_lock.
+ */
+void __bnx2x_link_report(struct bnx2x *bp)
+{
+ struct bnx2x_link_report_data cur_data;
- if (bp->state == BNX2X_STATE_OPEN)
- netif_carrier_on(bp->dev);
- netdev_info(bp->dev, "NIC Link is Up, ");
+ /* reread mf_cfg */
+ if (!CHIP_IS_E1(bp))
+ bnx2x_read_mf_cfg(bp);
+
+ /* Read the current link report info */
+ bnx2x_fill_report_data(bp, &cur_data);
+
+ /* Don't report link down or exactly the same link status twice */
+ if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
+ (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &bp->last_reported_link.link_report_flags) &&
+ test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &cur_data.link_report_flags)))
+ return;
- line_speed = bnx2x_get_mf_speed(bp);
+ bp->link_cnt++;
- pr_cont("%d Mbps ", line_speed);
+ /* We are going to report a new link parameters now -
+ * remember the current data for the next time.
+ */
+ memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
+
+ if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &cur_data.link_report_flags)) {
+ netif_carrier_off(bp->dev);
+ netdev_err(bp->dev, "NIC Link is Down\n");
+ return;
+ } else {
+ netif_carrier_on(bp->dev);
+ netdev_info(bp->dev, "NIC Link is Up, ");
+ pr_cont("%d Mbps ", cur_data.line_speed);
- if (bp->link_vars.duplex == DUPLEX_FULL)
+ if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
+ &cur_data.link_report_flags))
pr_cont("full duplex");
else
pr_cont("half duplex");
- if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
- if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
+ /* Handle the FC at the end so that only these flags would be
+ * possibly set. This way we may easily check if there is no FC
+ * enabled.
+ */
+ if (cur_data.link_report_flags) {
+ if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+ &cur_data.link_report_flags)) {
pr_cont(", receive ");
- if (bp->link_vars.flow_ctrl &
- BNX2X_FLOW_CTRL_TX)
+ if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+ &cur_data.link_report_flags))
pr_cont("& transmit ");
} else {
pr_cont(", transmit ");
@@ -794,62 +923,9 @@ void bnx2x_link_report(struct bnx2x *bp)
pr_cont("flow control ON");
}
pr_cont("\n");
-
- } else { /* link_down */
- netif_carrier_off(bp->dev);
- netdev_err(bp->dev, "NIC Link is Down\n");
}
}
-/* Returns the number of actually allocated BDs */
-static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
- int rx_ring_size)
-{
- struct bnx2x *bp = fp->bp;
- u16 ring_prod, cqe_ring_prod;
- int i;
-
- fp->rx_comp_cons = 0;
- cqe_ring_prod = ring_prod = 0;
- for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
- BNX2X_ERR("was only able to allocate "
- "%d rx skbs on queue[%d]\n", i, fp->index);
- fp->eth_q_stats.rx_skb_alloc_failed++;
- break;
- }
- ring_prod = NEXT_RX_IDX(ring_prod);
- cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
- WARN_ON(ring_prod <= i);
- }
-
- fp->rx_bd_prod = ring_prod;
- /* Limit the CQE producer by the CQE ring size */
- fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
- cqe_ring_prod);
- fp->rx_pkt = fp->rx_calls = 0;
-
- return i;
-}
-
-static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
-{
- struct bnx2x *bp = fp->bp;
- int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
- MAX_RX_AVAIL/bp->num_queues;
-
- rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
-
- bnx2x_alloc_rx_bds(fp, rx_ring_size);
-
- /* Warning!
- * this will generate an interrupt (to the TSTORM)
- * must only be done after chip is initialized
- */
- bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
- fp->rx_sge_prod);
-}
-
void bnx2x_init_rx_rings(struct bnx2x *bp)
{
int func = BP_FUNC(bp);
@@ -858,6 +934,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
u16 ring_prod;
int i, j;
+ /* Allocate TPA resources */
for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
@@ -865,6 +942,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
if (!fp->disable_tpa) {
+ /* Fill the per-aggregation pool */
for (i = 0; i < max_agg_queues; i++) {
fp->tpa_pool[i].skb =
netdev_alloc_skb(bp->dev, fp->rx_buf_size);
@@ -919,13 +997,13 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
fp->rx_bd_cons = 0;
- bnx2x_set_next_page_rx_bd(fp);
-
- /* CQ ring */
- bnx2x_set_next_page_rx_cq(fp);
-
- /* Allocate BDs and initialize BD ring */
- bnx2x_alloc_rx_bd_ring(fp);
+ /* Activate BD ring */
+ /* Warning!
+ * this will generate an interrupt (to the TSTORM)
+ * must only be done after chip is initialized
+ */
+ bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+ fp->rx_sge_prod);
if (j != 0)
continue;
@@ -959,27 +1037,40 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
}
}
+static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
+{
+ struct bnx2x *bp = fp->bp;
+ int i;
+
+ /* ring wasn't allocated */
+ if (fp->rx_buf_ring == NULL)
+ return;
+
+ for (i = 0; i < NUM_RX_BD; i++) {
+ struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+ struct sk_buff *skb = rx_buf->skb;
+
+ if (skb == NULL)
+ continue;
+
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(rx_buf, mapping),
+ fp->rx_buf_size, DMA_FROM_DEVICE);
+
+ rx_buf->skb = NULL;
+ dev_kfree_skb(skb);
+ }
+}
+
static void bnx2x_free_rx_skbs(struct bnx2x *bp)
{
- int i, j;
+ int j;
for_each_rx_queue(bp, j) {
struct bnx2x_fastpath *fp = &bp->fp[j];
- for (i = 0; i < NUM_RX_BD; i++) {
- struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
- struct sk_buff *skb = rx_buf->skb;
-
- if (skb == NULL)
- continue;
-
- dma_unmap_single(&bp->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
- fp->rx_buf_size, DMA_FROM_DEVICE);
+ bnx2x_free_rx_bds(fp);
- rx_buf->skb = NULL;
- dev_kfree_skb(skb);
- }
if (!fp->disable_tpa)
bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
ETH_MAX_AGGREGATION_QUEUES_E1 :
@@ -1345,29 +1436,47 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
+ /* Set the initial link reported state to link down */
+ bnx2x_acquire_phy_lock(bp);
+ memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
+ __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+ &bp->last_reported_link.link_report_flags);
+ bnx2x_release_phy_lock(bp);
+
/* must be called before memory allocation and HW init */
bnx2x_ilt_set_info(bp);
+ /* zero fastpath structures preserving invariants like napi which are
+ * allocated only once
+ */
+ for_each_queue(bp, i)
+ bnx2x_bz_fp(bp, i);
+
/* Set the receive queues buffer size */
bnx2x_set_rx_buf_size(bp);
+ for_each_queue(bp, i)
+ bnx2x_fp(bp, i, disable_tpa) =
+ ((bp->flags & TPA_ENABLE_FLAG) == 0);
+
+#ifdef BCM_CNIC
+ /* We don't want TPA on FCoE L2 ring */
+ bnx2x_fcoe(bp, disable_tpa) = 1;
+#endif
+
if (bnx2x_alloc_mem(bp))
return -ENOMEM;
+ /* As long as bnx2x_alloc_mem() may possibly update
+ * bp->num_queues, bnx2x_set_real_num_queues() should always
+ * come after it.
+ */
rc = bnx2x_set_real_num_queues(bp);
if (rc) {
BNX2X_ERR("Unable to set real_num_queues\n");
goto load_error0;
}
- for_each_queue(bp, i)
- bnx2x_fp(bp, i, disable_tpa) =
- ((bp->flags & TPA_ENABLE_FLAG) == 0);
-
-#ifdef BCM_CNIC
- /* We don't want TPA on FCoE L2 ring */
- bnx2x_fcoe(bp, disable_tpa) = 1;
-#endif
bnx2x_napi_enable(bp);
/* Send LOAD_REQUEST command to MCP
@@ -1976,12 +2085,11 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
}
/**
- * Update PBD in GSO case.
+ * bnx2x_set_pbd_gso - update PBD in GSO case.
*
- * @param skb
- * @param tx_start_bd
- * @param pbd
- * @param xmit_type
+ * @skb: packet skb
+ * @pbd: parse BD
+ * @xmit_type: xmit flags
*/
static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
@@ -2008,42 +2116,50 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
}
/**
+ * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
*
- * @param skb
- * @param tx_start_bd
- * @param pbd_e2
- * @param xmit_type
+ * @bp: driver handle
+ * @skb: packet skb
+ * @parsing_data: data to be updated
+ * @xmit_type: xmit flags
*
- * @return header len
+ * 57712 related
*/
static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
u32 *parsing_data, u32 xmit_type)
{
- *parsing_data |= ((tcp_hdrlen(skb)/4) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+ *parsing_data |=
+ ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
- *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
- ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+ if (xmit_type & XMIT_CSUM_TCP) {
+ *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+ ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
- return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+ } else
+ /* We support checksum offload for TCP and UDP only.
+ * No need to pass the UDP header length - it's a constant.
+ */
+ return skb_transport_header(skb) +
+ sizeof(struct udphdr) - skb->data;
}
/**
+ * bnx2x_set_pbd_csum - update PBD with checksum and return header length
*
- * @param skb
- * @param tx_start_bd
- * @param pbd
- * @param xmit_type
- *
- * @return Header length
+ * @bp: driver handle
+ * @skb: packet skb
+ * @pbd: parse BD to be updated
+ * @xmit_type: xmit flags
*/
static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
struct eth_tx_parse_bd_e1x *pbd,
u32 xmit_type)
{
- u8 hlen = (skb_network_header(skb) - skb->data) / 2;
+ u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
/* for now NS flag is not used in Linux */
pbd->global_data =
@@ -2051,9 +2167,15 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
pbd->ip_hlen_w = (skb_transport_header(skb) -
- skb_network_header(skb)) / 2;
+ skb_network_header(skb)) >> 1;
- hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
+ hlen += pbd->ip_hlen_w;
+
+ /* We support checksum offload for TCP and UDP only */
+ if (xmit_type & XMIT_CSUM_TCP)
+ hlen += tcp_hdrlen(skb) / 2;
+ else
+ hlen += sizeof(struct udphdr) / 2;
pbd->total_hlen_w = cpu_to_le16(hlen);
hlen = hlen*2;
@@ -2379,6 +2501,232 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return 0;
}
+static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
+{
+ union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
+ struct bnx2x_fastpath *fp = &bp->fp[fp_index];
+
+ /* Common */
+#ifdef BCM_CNIC
+ if (IS_FCOE_IDX(fp_index)) {
+ memset(sb, 0, sizeof(union host_hc_status_block));
+ fp->status_blk_mapping = 0;
+
+ } else {
+#endif
+ /* status blocks */
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_FREE(sb->e2_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_FREE(sb->e1x_sb,
+ bnx2x_fp(bp, fp_index,
+ status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
+ /* Rx */
+ if (!skip_rx_queue(bp, fp_index)) {
+ bnx2x_free_rx_bds(fp);
+
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
+ bnx2x_fp(bp, fp_index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
+ bnx2x_fp(bp, fp_index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
+ bnx2x_fp(bp, fp_index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ }
+
+ /* Tx */
+ if (!skip_tx_queue(bp, fp_index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
+ BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
+ bnx2x_fp(bp, fp_index, tx_desc_mapping),
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+ /* end of fastpath */
+}
+
+void bnx2x_free_fp_mem(struct bnx2x *bp)
+{
+ int i;
+ for_each_queue(bp, i)
+ bnx2x_free_fp_mem_at(bp, i);
+}
+
+static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
+ if (CHIP_IS_E2(bp)) {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e2_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e2_sb->sb.running_index;
+ } else {
+ bnx2x_fp(bp, index, sb_index_values) =
+ (__le16 *)status_blk.e1x_sb->sb.index_values;
+ bnx2x_fp(bp, index, sb_running_index) =
+ (__le16 *)status_blk.e1x_sb->sb.running_index;
+ }
+}
+
+static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
+{
+ union host_hc_status_block *sb;
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ int ring_size = 0;
+
+ /* if rx_ring_size specified - use it */
+ int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
+ MAX_RX_AVAIL/bp->num_queues;
+
+ /* allocate at least number of buffers required by FW */
+ rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA,
+ rx_ring_size);
+
+ bnx2x_fp(bp, index, bp) = bp;
+ bnx2x_fp(bp, index, index) = index;
+
+ /* Common */
+ sb = &bnx2x_fp(bp, index, status_blk);
+#ifdef BCM_CNIC
+ if (!IS_FCOE_IDX(index)) {
+#endif
+ /* status blocks */
+ if (CHIP_IS_E2(bp))
+ BNX2X_PCI_ALLOC(sb->e2_sb,
+ &bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ else
+ BNX2X_PCI_ALLOC(sb->e1x_sb,
+ &bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+ }
+#endif
+ set_sb_shortcuts(bp, index);
+
+ /* Tx */
+ if (!skip_tx_queue(bp, index)) {
+ /* fastpath tx rings: tx_buf tx_desc */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
+ sizeof(struct sw_tx_bd) * NUM_TX_BD);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
+ &bnx2x_fp(bp, index, tx_desc_mapping),
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ }
+
+ /* Rx */
+ if (!skip_rx_queue(bp, index)) {
+ /* fastpath rx rings: rx_buf rx_desc rx_comp */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
+ sizeof(struct sw_rx_bd) * NUM_RX_BD);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
+ &bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
+ &bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) *
+ NUM_RCQ_BD);
+
+ /* SGE ring */
+ BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
+ sizeof(struct sw_rx_page) * NUM_RX_SGE);
+ BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
+ &bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ /* RX BD ring */
+ bnx2x_set_next_page_rx_bd(fp);
+
+ /* CQ ring */
+ bnx2x_set_next_page_rx_cq(fp);
+
+ /* BDs */
+ ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
+ if (ring_size < rx_ring_size)
+ goto alloc_mem_err;
+ }
+
+ return 0;
+
+/* handles low memory cases */
+alloc_mem_err:
+ BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
+ index, ring_size);
+ /* FW will drop all packets if queue is not big enough,
+ * In these cases we disable the queue
+ * Min size diferent for TPA and non-TPA queues
+ */
+ if (ring_size < (fp->disable_tpa ?
+ MIN_RX_SIZE_TPA : MIN_RX_SIZE_NONTPA)) {
+ /* release memory allocated for this queue */
+ bnx2x_free_fp_mem_at(bp, index);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+{
+ int i;
+
+ /**
+ * 1. Allocate FP for leading - fatal if error
+ * 2. {CNIC} Allocate FCoE FP - fatal if error
+ * 3. Allocate RSS - fix number of queues if error
+ */
+
+ /* leading */
+ if (bnx2x_alloc_fp_mem_at(bp, 0))
+ return -ENOMEM;
+#ifdef BCM_CNIC
+ /* FCoE */
+ if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+ return -ENOMEM;
+#endif
+ /* RSS */
+ for_each_nondefault_eth_queue(bp, i)
+ if (bnx2x_alloc_fp_mem_at(bp, i))
+ break;
+
+ /* handle memory failures */
+ if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
+ int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
+
+ WARN_ON(delta < 0);
+#ifdef BCM_CNIC
+ /**
+ * move non eth FPs next to last eth FP
+ * must be done in that order
+ * FCOE_IDX < FWD_IDX < OOO_IDX
+ */
+
+ /* move FCoE fp */
+ bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+#endif
+ bp->num_queues -= delta;
+ BNX2X_ERR("Adjusted num of queues from %d to %d\n",
+ bp->num_queues + delta, bp->num_queues);
+ }
+
+ return 0;
+}
static int bnx2x_setup_irqs(struct bnx2x *bp)
{
@@ -2443,11 +2791,21 @@ alloc_err:
}
+static int bnx2x_reload_if_running(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (unlikely(!netif_running(dev)))
+ return 0;
+
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+ return bnx2x_nic_load(bp, LOAD_NORMAL);
+}
+
/* called with rtnl_lock */
int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnx2x *bp = netdev_priv(dev);
- int rc = 0;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
printk(KERN_ERR "Handling parity error recovery. Try again later\n");
@@ -2464,12 +2822,39 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/
dev->mtu = new_mtu;
- if (netif_running(dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+ return bnx2x_reload_if_running(dev);
+}
+
+u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ /* TPA requires Rx CSUM offloading */
+ if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+int bnx2x_set_features(struct net_device *dev, u32 features)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+ u32 flags = bp->flags;
+
+ if (features & NETIF_F_LRO)
+ flags |= TPA_ENABLE_FLAG;
+ else
+ flags &= ~TPA_ENABLE_FLAG;
+
+ if (flags ^ bp->flags) {
+ bp->flags = flags;
+
+ if (bp->recovery_state == BNX2X_RECOVERY_DONE)
+ return bnx2x_reload_if_running(dev);
+ /* else: bnx2x_nic_load() will be called at end of recovery */
}
- return rc;
+ return 0;
}
void bnx2x_tx_timeout(struct net_device *dev)
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index ef37b98d6146..fab161e8030d 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,6 @@
/* bnx2x_cmn.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,260 +25,277 @@
extern int num_queues;
+/************************ Macros ********************************/
+#define BNX2X_PCI_FREE(x, y, size) \
+ do { \
+ if (x) { \
+ dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
+ x = NULL; \
+ y = 0; \
+ } \
+ } while (0)
+
+#define BNX2X_FREE(x) \
+ do { \
+ if (x) { \
+ kfree((void *)x); \
+ x = NULL; \
+ } \
+ } while (0)
+
+#define BNX2X_PCI_ALLOC(x, y, size) \
+ do { \
+ x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x == NULL) \
+ goto alloc_mem_err; \
+ memset((void *)x, 0, size); \
+ } while (0)
+
+#define BNX2X_ALLOC(x, size) \
+ do { \
+ x = kzalloc(size, GFP_KERNEL); \
+ if (x == NULL) \
+ goto alloc_mem_err; \
+ } while (0)
+
/*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version
*/
/**
- * Initialize link parameters structure variables.
- *
- * @param bp
- * @param load_mode
+ * bnx2x_initial_phy_init - initialize link parameters structure variables.
*
- * @return u8
+ * @bp: driver handle
+ * @load_mode: current mode
*/
u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
/**
- * Configure hw according to link parameters structure.
+ * bnx2x_link_set - configure hw according to link parameters structure.
*
- * @param bp
+ * @bp: driver handle
*/
void bnx2x_link_set(struct bnx2x *bp);
/**
- * Query link status
+ * bnx2x_link_test - query link status.
*
- * @param bp
- * @param is_serdes
+ * @bp: driver handle
+ * @is_serdes: bool
*
- * @return 0 - link is UP
+ * Returns 0 if link is UP.
*/
u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
/**
- * Handles link status change
+ * bnx2x__link_status_update - handles link status change.
*
- * @param bp
+ * @bp: driver handle
*/
void bnx2x__link_status_update(struct bnx2x *bp);
/**
- * Report link status to upper layer
- *
- * @param bp
+ * bnx2x_link_report - report link status to upper layer.
*
- * @return int
+ * @bp: driver handle
*/
void bnx2x_link_report(struct bnx2x *bp);
+/* None-atomic version of bnx2x_link_report() */
+void __bnx2x_link_report(struct bnx2x *bp);
+
/**
- * calculates MF speed according to current linespeed and MF
- * configuration
+ * bnx2x_get_mf_speed - calculate MF speed.
*
- * @param bp
+ * @bp: driver handle
*
- * @return u16
+ * Takes into account current linespeed and MF configuration.
*/
u16 bnx2x_get_mf_speed(struct bnx2x *bp);
/**
- * MSI-X slowpath interrupt handler
- *
- * @param irq
- * @param dev_instance
+ * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
*
- * @return irqreturn_t
+ * @irq: irq number
+ * @dev_instance: private instance
*/
irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
/**
- * non MSI-X interrupt handler
+ * bnx2x_interrupt - non MSI-X interrupt handler
*
- * @param irq
- * @param dev_instance
- *
- * @return irqreturn_t
+ * @irq: irq number
+ * @dev_instance: private instance
*/
irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
#ifdef BCM_CNIC
/**
- * Send command to cnic driver
+ * bnx2x_cnic_notify - send command to cnic driver
*
- * @param bp
- * @param cmd
+ * @bp: driver handle
+ * @cmd: command
*/
int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
/**
- * Provides cnic information for proper interrupt handling
+ * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
*
- * @param bp
+ * @bp: driver handle
*/
void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
#endif
/**
- * Enable HW interrupts.
+ * bnx2x_int_enable - enable HW interrupts.
*
- * @param bp
+ * @bp: driver handle
*/
void bnx2x_int_enable(struct bnx2x *bp);
/**
- * Disable interrupts. This function ensures that there are no
- * ISRs or SP DPCs (sp_task) are running after it returns.
+ * bnx2x_int_disable_sync - disable interrupts.
*
- * @param bp
- * @param disable_hw if true, disable HW interrupts.
+ * @bp: driver handle
+ * @disable_hw: true, disable HW interrupts.
+ *
+ * This function ensures that there are no
+ * ISRs or SP DPCs (sp_task) are running after it returns.
*/
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
/**
- * Loads device firmware
+ * bnx2x_init_firmware - loads device firmware
*
- * @param bp
- *
- * @return int
+ * @bp: driver handle
*/
int bnx2x_init_firmware(struct bnx2x *bp);
/**
- * Init HW blocks according to current initialization stage:
- * COMMON, PORT or FUNCTION.
- *
- * @param bp
- * @param load_code: COMMON, PORT or FUNCTION
+ * bnx2x_init_hw - init HW blocks according to current initialization stage.
*
- * @return int
+ * @bp: driver handle
+ * @load_code: COMMON, PORT or FUNCTION
*/
int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
/**
- * Init driver internals:
+ * bnx2x_nic_init - init driver internals.
+ *
+ * @bp: driver handle
+ * @load_code: COMMON, PORT or FUNCTION
+ *
+ * Initializes:
* - rings
* - status blocks
* - etc.
- *
- * @param bp
- * @param load_code COMMON, PORT or FUNCTION
*/
void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
/**
- * Allocate driver's memory.
- *
- * @param bp
+ * bnx2x_alloc_mem - allocate driver's memory.
*
- * @return int
+ * @bp: driver handle
*/
int bnx2x_alloc_mem(struct bnx2x *bp);
/**
- * Release driver's memory.
+ * bnx2x_free_mem - release driver's memory.
*
- * @param bp
+ * @bp: driver handle
*/
void bnx2x_free_mem(struct bnx2x *bp);
/**
- * Setup eth Client.
+ * bnx2x_setup_client - setup eth client.
*
- * @param bp
- * @param fp
- * @param is_leading
- *
- * @return int
+ * @bp: driver handle
+ * @fp: pointer to fastpath structure
+ * @is_leading: boolean
*/
int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int is_leading);
/**
- * Set number of queues according to mode
- *
- * @param bp
+ * bnx2x_set_num_queues - set number of queues according to mode.
*
+ * @bp: driver handle
*/
void bnx2x_set_num_queues(struct bnx2x *bp);
/**
- * Cleanup chip internals:
+ * bnx2x_chip_cleanup - cleanup chip internals.
+ *
+ * @bp: driver handle
+ * @unload_mode: COMMON, PORT, FUNCTION
+ *
* - Cleanup MAC configuration.
- * - Close clients.
+ * - Closes clients.
* - etc.
- *
- * @param bp
- * @param unload_mode
*/
void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
/**
- * Acquire HW lock.
+ * bnx2x_acquire_hw_lock - acquire HW lock.
*
- * @param bp
- * @param resource Resource bit which was locked
- *
- * @return int
+ * @bp: driver handle
+ * @resource: resource bit which was locked
*/
int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
/**
- * Release HW lock.
- *
- * @param bp driver handle
- * @param resource Resource bit which was locked
+ * bnx2x_release_hw_lock - release HW lock.
*
- * @return int
+ * @bp: driver handle
+ * @resource: resource bit which was locked
*/
int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
/**
- * Configure eth MAC address in the HW according to the value in
- * netdev->dev_addr.
+ * bnx2x_set_eth_mac - configure eth MAC address in the HW
*
- * @param bp driver handle
- * @param set
+ * @bp: driver handle
+ * @set: set or clear
+ *
+ * Configures according to the value in netdev->dev_addr.
*/
void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
#ifdef BCM_CNIC
/**
- * Set/Clear FIP MAC(s) at the next enties in the CAM after the ETH
- * MAC(s). This function will wait until the ramdord completion
- * returns.
+ * bnx2x_set_fip_eth_mac_addr - Set/Clear FIP MAC(s)
*
- * @param bp driver handle
- * @param set set or clear the CAM entry
+ * @bp: driver handle
+ * @set: set or clear the CAM entry
*
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ * Used next enties in the CAM after the ETH MAC(s).
+ * This function will wait until the ramdord completion returns.
+ * Return 0 if cussess, -ENODEV if ramrod doesn't return.
*/
int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
/**
- * Set/Clear ALL_ENODE mcast MAC.
- *
- * @param bp
- * @param set
+ * bnx2x_set_all_enode_macs - Set/Clear ALL_ENODE mcast MAC.
*
- * @return int
+ * @bp: driver handle
+ * @set: set or clear
*/
int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
#endif
/**
- * Set MAC filtering configurations.
+ * bnx2x_set_rx_mode - set MAC filtering configurations.
*
- * @remarks called with netif_tx_lock from dev_mcast.c
+ * @dev: netdevice
*
- * @param dev net_device
+ * called with netif_tx_lock from dev_mcast.c
*/
void bnx2x_set_rx_mode(struct net_device *dev);
/**
- * Configure MAC filtering rules in a FW.
+ * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
*
- * @param bp driver handle
+ * @bp: driver handle
*/
void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
@@ -290,63 +307,59 @@ bool bnx2x_reset_is_done(struct bnx2x *bp);
void bnx2x_disable_close_the_gate(struct bnx2x *bp);
/**
- * Perform statistics handling according to event
+ * bnx2x_stats_handle - perform statistics handling according to event.
*
- * @param bp driver handle
- * @param event bnx2x_stats_event
+ * @bp: driver handle
+ * @event: bnx2x_stats_event
*/
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
/**
- * Handle ramrods completion
+ * bnx2x_sp_event - handle ramrods completion.
*
- * @param fp fastpath handle for the event
- * @param rr_cqe eth_rx_cqe
+ * @fp: fastpath handle for the event
+ * @rr_cqe: eth_rx_cqe
*/
void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
/**
- * Init/halt function before/after sending
- * CLIENT_SETUP/CFC_DEL for the first/last client.
+ * bnx2x_func_start - init function
*
- * @param bp
+ * @bp: driver handle
*
- * @return int
+ * Must be called before sending CLIENT_SETUP for the first client.
*/
int bnx2x_func_start(struct bnx2x *bp);
/**
- * Prepare ILT configurations according to current driver
- * parameters.
+ * bnx2x_ilt_set_info - prepare ILT configurations.
*
- * @param bp
+ * @bp: driver handle
*/
void bnx2x_ilt_set_info(struct bnx2x *bp);
/**
- * Inintialize dcbx protocol
+ * bnx2x_dcbx_init - initialize dcbx protocol.
*
- * @param bp
+ * @bp: driver handle
*/
void bnx2x_dcbx_init(struct bnx2x *bp);
/**
- * Set power state to the requested value. Currently only D0 and
- * D3hot are supported.
+ * bnx2x_set_power_state - set power state to the requested value.
*
- * @param bp
- * @param state D0 or D3hot
+ * @bp: driver handle
+ * @state: required state D0 or D3hot
*
- * @return int
+ * Currently only D0 and D3hot are supported.
*/
int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
/**
- * Updates MAX part of MF configuration in HW
- * (if required)
+ * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
*
- * @param bp
- * @param value
+ * @bp: driver handle
+ * @value: new value
*/
void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
@@ -377,83 +390,72 @@ int bnx2x_resume(struct pci_dev *pdev);
/* Release IRQ vectors */
void bnx2x_free_irq(struct bnx2x *bp);
+void bnx2x_free_fp_mem(struct bnx2x *bp);
+int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+
void bnx2x_init_rx_rings(struct bnx2x *bp);
void bnx2x_free_skbs(struct bnx2x *bp);
void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
void bnx2x_netif_start(struct bnx2x *bp);
/**
- * Fill msix_table, request vectors, update num_queues according
- * to number of available vectors
+ * bnx2x_enable_msix - set msix configuration.
*
- * @param bp
+ * @bp: driver handle
*
- * @return int
+ * fills msix_table, requests vectors, updates num_queues
+ * according to number of available vectors.
*/
int bnx2x_enable_msix(struct bnx2x *bp);
/**
- * Request msi mode from OS, updated internals accordingly
- *
- * @param bp
+ * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
*
- * @return int
+ * @bp: driver handle
*/
int bnx2x_enable_msi(struct bnx2x *bp);
/**
- * NAPI callback
+ * bnx2x_poll - NAPI callback
*
- * @param napi
- * @param budget
+ * @napi: napi structure
+ * @budget:
*
- * @return int
*/
int bnx2x_poll(struct napi_struct *napi, int budget);
/**
- * Allocate/release memories outsize main driver structure
+ * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
*
- * @param bp
- *
- * @return int
+ * @bp: driver handle
*/
int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
-void bnx2x_free_mem_bp(struct bnx2x *bp);
/**
- * Change mtu netdev callback
+ * bnx2x_free_mem_bp - release memories outsize main driver structure
*
- * @param dev
- * @param new_mtu
- *
- * @return int
+ * @bp: driver handle
*/
-int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
+void bnx2x_free_mem_bp(struct bnx2x *bp);
/**
- * tx timeout netdev callback
+ * bnx2x_change_mtu - change mtu netdev callback
*
- * @param dev
- * @param new_mtu
+ * @dev: net device
+ * @new_mtu: requested mtu
*
- * @return int
*/
-void bnx2x_tx_timeout(struct net_device *dev);
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
+
+u32 bnx2x_fix_features(struct net_device *dev, u32 features);
+int bnx2x_set_features(struct net_device *dev, u32 features);
-#ifdef BCM_VLAN
/**
- * vlan rx register netdev callback
+ * bnx2x_tx_timeout - tx timeout netdev callback
*
- * @param dev
- * @param new_mtu
- *
- * @return int
+ * @dev: net device
*/
-void bnx2x_vlan_rx_register(struct net_device *dev,
- struct vlan_group *vlgrp);
-
-#endif
+void bnx2x_tx_timeout(struct net_device *dev);
static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
{
@@ -705,7 +707,7 @@ static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
/**
* disables tx from stack point of view
*
- * @param bp
+ * @bp: driver handle
*/
static inline void bnx2x_tx_disable(struct bnx2x *bp)
{
@@ -880,6 +882,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{
int i;
+ if (fp->disable_tpa)
+ return;
+
for (i = 0; i < last; i++)
bnx2x_free_rx_sge(bp, fp, i);
}
@@ -908,36 +913,39 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
}
}
-
-static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
+static inline void bnx2x_init_tx_ring_one(struct bnx2x_fastpath *fp)
{
- int i, j;
+ int i;
- for_each_tx_queue(bp, j) {
- struct bnx2x_fastpath *fp = &bp->fp[j];
+ for (i = 1; i <= NUM_TX_RINGS; i++) {
+ struct eth_tx_next_bd *tx_next_bd =
+ &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
- for (i = 1; i <= NUM_TX_RINGS; i++) {
- struct eth_tx_next_bd *tx_next_bd =
- &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
+ tx_next_bd->addr_hi =
+ cpu_to_le32(U64_HI(fp->tx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+ tx_next_bd->addr_lo =
+ cpu_to_le32(U64_LO(fp->tx_desc_mapping +
+ BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+ }
- tx_next_bd->addr_hi =
- cpu_to_le32(U64_HI(fp->tx_desc_mapping +
- BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
- tx_next_bd->addr_lo =
- cpu_to_le32(U64_LO(fp->tx_desc_mapping +
- BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
- }
+ SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
+ fp->tx_db.data.zero_fill1 = 0;
+ fp->tx_db.data.prod = 0;
- SET_FLAG(fp->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
- fp->tx_db.data.zero_fill1 = 0;
- fp->tx_db.data.prod = 0;
+ fp->tx_pkt_prod = 0;
+ fp->tx_pkt_cons = 0;
+ fp->tx_bd_prod = 0;
+ fp->tx_bd_cons = 0;
+ fp->tx_pkt = 0;
+}
- fp->tx_pkt_prod = 0;
- fp->tx_pkt_cons = 0;
- fp->tx_bd_prod = 0;
- fp->tx_bd_cons = 0;
- fp->tx_pkt = 0;
- }
+static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
+{
+ int i;
+
+ for_each_tx_queue(bp, i)
+ bnx2x_init_tx_ring_one(&bp->fp[i]);
}
static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
@@ -992,6 +1000,44 @@ static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
}
}
+/* Returns the number of actually allocated BDs */
+static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
+ int rx_ring_size)
+{
+ struct bnx2x *bp = fp->bp;
+ u16 ring_prod, cqe_ring_prod;
+ int i;
+
+ fp->rx_comp_cons = 0;
+ cqe_ring_prod = ring_prod = 0;
+
+ /* This routine is called only during fo init so
+ * fp->eth_q_stats.rx_skb_alloc_failed = 0
+ */
+ for (i = 0; i < rx_ring_size; i++) {
+ if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ continue;
+ }
+ ring_prod = NEXT_RX_IDX(ring_prod);
+ cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
+ WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
+ }
+
+ if (fp->eth_q_stats.rx_skb_alloc_failed)
+ BNX2X_ERR("was only able to allocate "
+ "%d rx skbs on queue[%d]\n",
+ (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
+
+ fp->rx_bd_prod = ring_prod;
+ /* Limit the CQE producer by the CQE ring size */
+ fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+ cqe_ring_prod);
+ fp->rx_pkt = fp->rx_calls = 0;
+
+ return i - fp->eth_q_stats.rx_skb_alloc_failed;
+}
+
#ifdef BCM_CNIC
static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
{
@@ -1041,12 +1087,23 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
struct cmng_struct_per_port *cmng,
u8 port)
{
- size_t size = sizeof(struct cmng_struct_per_port);
+ size_t size =
+ sizeof(struct rate_shaping_vars_per_port) +
+ sizeof(struct fairness_vars_per_port) +
+ sizeof(struct safc_struct_per_port) +
+ sizeof(struct pfc_struct_per_port);
u32 addr = BAR_XSTRORM_INTMEM +
XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
__storm_memset_struct(bp, addr, size, (u32 *)cmng);
+
+ addr += size + 4 /* SKIP DCB+LLFC */;
+ size = sizeof(struct cmng_struct_per_port) -
+ size /* written */ - 4 /*skipped*/;
+
+ __storm_memset_struct(bp, addr, size,
+ (u32 *)(cmng->traffic_type_to_priority_cos));
}
/* HW Lock for shared dual port PHYs */
@@ -1054,12 +1111,11 @@ void bnx2x_acquire_phy_lock(struct bnx2x *bp);
void bnx2x_release_phy_lock(struct bnx2x *bp);
/**
- * Extracts MAX BW part from MF configuration.
+ * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
*
- * @param bp
- * @param mf_cfg
+ * @bp: driver handle
+ * @mf_cfg: MF configuration
*
- * @return u16
*/
static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
{
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
index 9a24d79c71d9..0f8309233ff2 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -1,6 +1,6 @@
/* bnx2x_dcb.c: Broadcom Everest network driver.
*
- * Copyright 2009-2010 Broadcom Corporation
+ * Copyright 2009-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -571,6 +571,28 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
{
switch (state) {
case BNX2X_DCBX_STATE_NEG_RECEIVED:
+#ifdef BCM_CNIC
+ if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
+ struct cnic_ops *c_ops;
+ struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+ bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+ cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+ rcu_read_lock();
+ c_ops = rcu_dereference(bp->cnic_ops);
+ if (c_ops) {
+ bnx2x_cnic_notify(bp, CNIC_CTL_STOP_ISCSI_CMD);
+ rcu_read_unlock();
+ return;
+ }
+ rcu_read_unlock();
+ }
+
+ /* fall through if no CNIC initialized */
+ case BNX2X_DCBX_STATE_ISCSI_STOPPED:
+#endif
+
{
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
#ifdef BCM_DCBNL
@@ -1057,12 +1079,6 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
}
}
-
-/*******************************************************************************
- * Description: single priority group
- *
- * Return:
- ******************************************************************************/
static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
struct cos_help_data *cos_data,
u32 pri_join_mask)
@@ -1075,11 +1091,6 @@ static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
cos_data->num_of_cos = 1;
}
-/*******************************************************************************
- * Description: updating the cos bw
- *
- * Return:
- ******************************************************************************/
static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
struct cos_entry_help_data *data,
u8 pg_bw)
@@ -1090,11 +1101,6 @@ static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
data->cos_bw += pg_bw;
}
-/*******************************************************************************
- * Description: single priority group
- *
- * Return:
- ******************************************************************************/
static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
struct cos_help_data *cos_data,
u32 *pg_pri_orginal_spread,
@@ -1347,11 +1353,6 @@ static void bnx2x_dcbx_two_pg_to_cos_params(
}
}
-/*******************************************************************************
- * Description: Still
- *
- * Return:
- ******************************************************************************/
static void bnx2x_dcbx_three_pg_to_cos_params(
struct bnx2x *bp,
struct pg_help_data *pg_help_data,
@@ -1539,11 +1540,6 @@ static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
}
}
-/*******************************************************************************
- * Description: Fill pfc_config struct that will be sent in DCBX start ramrod
- *
- * Return:
- ******************************************************************************/
static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
{
struct flow_control_configuration *pfc_fw_cfg = NULL;
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
index 71b8eda43bd0..bed369d67e02 100644
--- a/drivers/net/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -1,6 +1,6 @@
/* bnx2x_dcb.h: Broadcom Everest network driver.
*
- * Copyright 2009-2010 Broadcom Corporation
+ * Copyright 2009-2011 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -61,9 +61,6 @@ struct bnx2x_dcbx_port_params {
#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
-/*******************************************************************************
- * LLDP protocol configuration parameters.
- ******************************************************************************/
struct bnx2x_config_lldp_params {
u32 overwrite_settings;
u32 msg_tx_hold;
@@ -83,9 +80,6 @@ struct bnx2x_admin_priority_app_table {
u32 app_id;
};
-/*******************************************************************************
- * DCBX protocol configuration parameters.
- ******************************************************************************/
struct bnx2x_config_dcbx_params {
u32 overwrite_settings;
u32 admin_dcbx_version;
@@ -183,9 +177,13 @@ void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
enum {
BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
- BNX2X_DCBX_STATE_TX_PAUSED = 0x2,
- BNX2X_DCBX_STATE_TX_RELEASED = 0x4
+#ifdef BCM_CNIC
+ BNX2X_DCBX_STATE_ISCSI_STOPPED,
+#endif
+ BNX2X_DCBX_STATE_TX_PAUSED,
+ BNX2X_DCBX_STATE_TX_RELEASED
};
+
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
/* DCB netlink */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index f5050155c6b5..727fe89ff37f 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,6 @@
/* bnx2x_ethtool.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -167,6 +167,7 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+
/* Dual Media boards present all available port types */
cmd->supported = bp->port.supported[cfg_idx] |
(bp->port.supported[cfg_idx ^ 1] &
@@ -176,16 +177,16 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if ((bp->state == BNX2X_STATE_OPEN) &&
!(bp->flags & MF_FUNC_DIS) &&
(bp->link_vars.link_up)) {
- cmd->speed = bp->link_vars.line_speed;
+ ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
cmd->duplex = bp->link_vars.duplex;
} else {
-
- cmd->speed = bp->link_params.req_line_speed[cfg_idx];
+ ethtool_cmd_speed_set(
+ cmd, bp->link_params.req_line_speed[cfg_idx]);
cmd->duplex = bp->link_params.req_duplex[cfg_idx];
}
if (IS_MF(bp))
- cmd->speed = bnx2x_get_mf_speed(bp);
+ ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
cmd->port = PORT_TP;
@@ -206,10 +207,11 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->maxrxpkt = 0;
DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
- DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
+ DP_LEVEL " supported 0x%x advertising 0x%x speed %u\n"
DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+ cmd->cmd, cmd->supported, cmd->advertising,
+ ethtool_cmd_speed(cmd),
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
@@ -226,16 +228,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return 0;
DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
- " supported 0x%x advertising 0x%x speed %d speed_hi %d\n"
+ " supported 0x%x advertising 0x%x speed %u\n"
" duplex %d port %d phy_address %d transceiver %d\n"
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
- cmd->speed_hi,
+ cmd->cmd, cmd->supported, cmd->advertising,
+ ethtool_cmd_speed(cmd),
cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
- speed = cmd->speed;
- speed |= (cmd->speed_hi << 16);
+ speed = ethtool_cmd_speed(cmd);
if (IS_MF_SI(bp)) {
u32 part;
@@ -439,7 +440,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
default:
- DP(NETIF_MSG_LINK, "Unsupported speed %d\n", speed);
+ DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
return -EINVAL;
}
@@ -1219,7 +1220,8 @@ static int bnx2x_set_ringparam(struct net_device *dev,
}
if ((ering->rx_pending > MAX_RX_AVAIL) ||
- (ering->rx_pending < MIN_RX_AVAIL) ||
+ (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+ MIN_RX_SIZE_TPA)) ||
(ering->tx_pending > MAX_TX_AVAIL) ||
(ering->tx_pending <= MAX_SKB_FRAGS + 4))
return -EINVAL;
@@ -1299,91 +1301,6 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
return 0;
}
-static int bnx2x_set_flags(struct net_device *dev, u32 data)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int changed = 0;
- int rc = 0;
-
- if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- printk(KERN_ERR "Handling parity error recovery. Try again later\n");
- return -EAGAIN;
- }
-
- if (!(data & ETH_FLAG_RXVLAN))
- return -EINVAL;
-
- if ((data & ETH_FLAG_LRO) && bp->rx_csum && bp->disable_tpa)
- return -EINVAL;
-
- rc = ethtool_op_set_flags(dev, data, ETH_FLAG_LRO | ETH_FLAG_RXVLAN |
- ETH_FLAG_TXVLAN | ETH_FLAG_RXHASH);
- if (rc)
- return rc;
-
- /* TPA requires Rx CSUM offloading */
- if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
- if (!(bp->flags & TPA_ENABLE_FLAG)) {
- bp->flags |= TPA_ENABLE_FLAG;
- changed = 1;
- }
- } else if (bp->flags & TPA_ENABLE_FLAG) {
- dev->features &= ~NETIF_F_LRO;
- bp->flags &= ~TPA_ENABLE_FLAG;
- changed = 1;
- }
-
- if (changed && netif_running(dev)) {
- bnx2x_nic_unload(bp, UNLOAD_NORMAL);
- rc = bnx2x_nic_load(bp, LOAD_NORMAL);
- }
-
- return rc;
-}
-
-static u32 bnx2x_get_rx_csum(struct net_device *dev)
-{
- struct bnx2x *bp = netdev_priv(dev);
-
- return bp->rx_csum;
-}
-
-static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct bnx2x *bp = netdev_priv(dev);
- int rc = 0;
-
- if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- printk(KERN_ERR "Handling parity error recovery. Try again later\n");
- return -EAGAIN;
- }
-
- bp->rx_csum = data;
-
- /* Disable TPA, when Rx CSUM is disabled. Otherwise all
- TPA'ed packets will be discarded due to wrong TCP CSUM */
- if (!data) {
- u32 flags = ethtool_op_get_flags(dev);
-
- rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
- }
-
- return rc;
-}
-
-static int bnx2x_set_tso(struct net_device *dev, u32 data)
-{
- if (data) {
- dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
- dev->features |= NETIF_F_TSO6;
- } else {
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
- dev->features &= ~NETIF_F_TSO6;
- }
-
- return 0;
-}
-
static const struct {
char string[ETH_GSTRING_LEN];
} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
@@ -2097,36 +2014,37 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
}
}
-static int bnx2x_phys_id(struct net_device *dev, u32 data)
+static int bnx2x_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
struct bnx2x *bp = netdev_priv(dev);
- int i;
if (!netif_running(dev))
- return 0;
+ return -EAGAIN;
if (!bp->port.pmf)
- return 0;
+ return -EOPNOTSUPP;
- if (data == 0)
- data = 2;
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
- for (i = 0; i < (data * 2); i++) {
- if ((i % 2) == 0)
- bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OPER, SPEED_1000);
- else
- bnx2x_set_led(&bp->link_params, &bp->link_vars,
- LED_MODE_OFF, 0);
+ case ETHTOOL_ID_ON:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_ON, SPEED_1000);
+ break;
- msleep_interruptible(500);
- if (signal_pending(current))
- break;
- }
+ case ETHTOOL_ID_OFF:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_FRONT_PANEL_OFF, 0);
+
+ break;
- if (bp->link_vars.link_up)
- bnx2x_set_led(&bp->link_params, &bp->link_vars, LED_MODE_OPER,
+ case ETHTOOL_ID_INACTIVE:
+ bnx2x_set_led(&bp->link_params, &bp->link_vars,
+ LED_MODE_OPER,
bp->link_vars.line_speed);
+ }
return 0;
}
@@ -2205,20 +2123,10 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_ringparam = bnx2x_set_ringparam,
.get_pauseparam = bnx2x_get_pauseparam,
.set_pauseparam = bnx2x_set_pauseparam,
- .get_rx_csum = bnx2x_get_rx_csum,
- .set_rx_csum = bnx2x_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
- .set_flags = bnx2x_set_flags,
- .get_flags = ethtool_op_get_flags,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = bnx2x_set_tso,
.self_test = bnx2x_self_test,
.get_sset_count = bnx2x_get_sset_count,
.get_strings = bnx2x_get_strings,
- .phys_id = bnx2x_phys_id,
+ .set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
diff --git a/drivers/net/bnx2x/bnx2x_fw_defs.h b/drivers/net/bnx2x/bnx2x_fw_defs.h
index f4e5b1ce8149..9fe367836a57 100644
--- a/drivers/net/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
index f807262911e5..f4a07fbaed05 100644
--- a/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index be503cc0a50b..cdf19fe7c7f6 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,6 @@
/* bnx2x_hsi.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1929,7 +1929,7 @@ struct host_func_stats {
#define BCM_5710_FW_MAJOR_VERSION 6
#define BCM_5710_FW_MINOR_VERSION 2
-#define BCM_5710_FW_REVISION_VERSION 5
+#define BCM_5710_FW_REVISION_VERSION 9
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3019,7 +3019,7 @@ struct tstorm_eth_mac_filter_config {
/*
- * common flag to indicate existance of TPA.
+ * common flag to indicate existence of TPA.
*/
struct tstorm_eth_tpa_exist {
#if defined(__BIG_ENDIAN)
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index fa6dbe3f2058..d5399206f66e 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -1,7 +1,7 @@
/* bnx2x_init.h: Broadcom Everest network driver.
* Structures and macroes needed during the initialization.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index 66df29fcf751..aafd0232393f 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index f2f367d4e74d..076e11f5769f 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -385,7 +385,7 @@ u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
return 0;
}
/******************************************************************/
-/* ETS section */
+/* PFC section */
/******************************************************************/
static void bnx2x_bmac2_get_pfc_stat(struct link_params *params,
@@ -1301,14 +1301,12 @@ static u8 bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
return 0;
}
-/*
- * get_emac_base
- *
- * @param cb
- * @param mdc_mdio_access
- * @param port
+/**
+ * bnx2x_get_emac_base - retrive emac base address
*
- * @return u32
+ * @bp: driver handle
+ * @mdc_mdio_access: access type
+ * @port: port id
*
* This function selects the MDC/MDIO access (through emac0 or
* emac1) depend on the mdc_mdio_access, port, port swapped. Each
@@ -2823,7 +2821,7 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
struct link_params *params)
{
u16 cnt, ctrl;
- /* Wait for soft reset to get cleared upto 1 sec */
+ /* Wait for soft reset to get cleared up to 1 sec */
for (cnt = 0; cnt < 1000; cnt++) {
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, &ctrl);
@@ -4141,7 +4139,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
val = (1<<5);
/*
* Note that 2.5G works only when used with 1G
- * advertisment
+ * advertisement
*/
} else
val = (1<<5);
@@ -4151,7 +4149,7 @@ static u8 bnx2x_8073_config_init(struct bnx2x_phy *phy,
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
val |= (1<<7);
- /* Note that 2.5G works only when used with 1G advertisment */
+ /* Note that 2.5G works only when used with 1G advertisement */
if (phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
@@ -5232,14 +5230,14 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD, MDIO_PMA_REG_LASI_CTRL, 1);
} else {
- /* Force 1Gbps using autoneg with 1G advertisment */
+ /* Force 1Gbps using autoneg with 1G advertisement */
/* Allow CL37 through CL73 */
DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
- /* Enable Full-Duplex advertisment on CL37 */
+ /* Enable Full-Duplex advertisement on CL37 */
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
/* Enable CL37 AN */
@@ -6269,7 +6267,7 @@ static u8 bnx2x_848x3_config_init(struct bnx2x_phy *phy,
switch (actual_phy_selection) {
case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
- /* Do nothing. Essentialy this is like the priority copper */
+ /* Do nothing. Essentially this is like the priority copper */
break;
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
@@ -7765,7 +7763,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
msleep(10);
- /* The PHY reset is controled by GPIO 1
+ /* The PHY reset is controlled by GPIO 1
* Hold it as vars low
*/
/* clear link led */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index 32e64cc85d2c..2762edf956e9 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -1,6 +1,6 @@
/* bnx2x_main.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -2036,7 +2036,7 @@ static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
return CMNG_FNS_NONE;
}
-static void bnx2x_read_mf_cfg(struct bnx2x *bp)
+void bnx2x_read_mf_cfg(struct bnx2x *bp)
{
int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
@@ -2123,7 +2123,6 @@ static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
- u32 prev_link_status = bp->link_vars.link_status;
/* Make sure that we are synced with the current statistics */
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
@@ -2168,17 +2167,15 @@ static void bnx2x_link_attn(struct bnx2x *bp)
"single function mode without fairness\n");
}
+ __bnx2x_link_report(bp);
+
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
-
- /* indicate link status only if link status actually changed */
- if (prev_link_status != bp->link_vars.link_status)
- bnx2x_link_report(bp);
}
void bnx2x__link_status_update(struct bnx2x *bp)
{
- if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
+ if (bp->state != BNX2X_STATE_OPEN)
return;
bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
@@ -2188,10 +2185,6 @@ void bnx2x__link_status_update(struct bnx2x *bp)
else
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
- /* the link status update could be the result of a DCC event
- hence re-read the shmem mf configuration */
- bnx2x_read_mf_cfg(bp);
-
/* indicate link status */
bnx2x_link_report(bp);
}
@@ -3120,10 +3113,14 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_SET_MF_BW)
bnx2x_set_mf_bw(bp);
- bnx2x__link_status_update(bp);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
+ /* Always call it here: bnx2x_link_report() will
+ * prevent the link indication duplication.
+ */
+ bnx2x__link_status_update(bp);
+
if (bp->port.pmf &&
(val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
bp->dcbx_enabled > 0)
@@ -3702,7 +3699,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
hw_cons++;
- /* This function may never run in parralel with itself for a
+ /* This function may never run in parallel with itself for a
* specific bp, thus there is no need in "paired" read memory
* barrier here.
*/
@@ -3904,10 +3901,9 @@ static void bnx2x_timer(unsigned long data)
if (poll) {
struct bnx2x_fastpath *fp = &bp->fp[0];
- int rc;
bnx2x_tx_int(fp);
- rc = bnx2x_rx_int(fp, 1000);
+ bnx2x_rx_int(fp, 1000);
}
if (!BP_NOMCP(bp)) {
@@ -4062,7 +4058,6 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
struct hc_status_block_data_e2 sb_data_e2;
struct hc_status_block_data_e1x sb_data_e1x;
struct hc_status_block_sm *hc_sm_p;
- struct hc_index_data *hc_index_p;
int data_size;
u32 *sb_data_p;
@@ -4083,7 +4078,6 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
hc_sm_p = sb_data_e2.common.state_machine;
- hc_index_p = sb_data_e2.index_data;
sb_data_p = (u32 *)&sb_data_e2;
data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
} else {
@@ -4097,7 +4091,6 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
hc_sm_p = sb_data_e1x.common.state_machine;
- hc_index_p = sb_data_e1x.index_data;
sb_data_p = (u32 *)&sb_data_e1x;
data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
}
@@ -4454,7 +4447,7 @@ static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
fp->state = BNX2X_FP_STATE_CLOSED;
- fp->index = fp->cid = fp_idx;
+ fp->cid = fp_idx;
fp->cl_id = BP_L_ID(bp) + fp_idx;
fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
@@ -4566,9 +4559,11 @@ gunzip_nomem1:
static void bnx2x_gunzip_end(struct bnx2x *bp)
{
- kfree(bp->strm->workspace);
- kfree(bp->strm);
- bp->strm = NULL;
+ if (bp->strm) {
+ kfree(bp->strm->workspace);
+ kfree(bp->strm);
+ bp->strm = NULL;
+ }
if (bp->gunzip_buf) {
dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
@@ -5089,7 +5084,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
/* Step 1: set zeroes to all ilt page entries with valid bit on
* Step 2: set the timers first/last ilt entry to point
* to the entire range to prevent ILT range error for 3rd/4th
- * vnic (this code assumes existance of the vnic)
+ * vnic (this code assumes existence of the vnic)
*
* both steps performed by call to bnx2x_ilt_client_init_op()
* with dummy TM client
@@ -5876,9 +5871,6 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
bp->dmae_ready = 0;
spin_lock_init(&bp->dmae_lock);
- rc = bnx2x_gunzip_init(bp);
- if (rc)
- return rc;
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_COMMON:
@@ -5922,80 +5914,10 @@ init_hw_err:
void bnx2x_free_mem(struct bnx2x *bp)
{
-
-#define BNX2X_PCI_FREE(x, y, size) \
- do { \
- if (x) { \
- dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
- x = NULL; \
- y = 0; \
- } \
- } while (0)
-
-#define BNX2X_FREE(x) \
- do { \
- if (x) { \
- kfree((void *)x); \
- x = NULL; \
- } \
- } while (0)
-
- int i;
+ bnx2x_gunzip_end(bp);
/* fastpath */
- /* Common */
- for_each_queue(bp, i) {
-#ifdef BCM_CNIC
- /* FCoE client uses default status block */
- if (IS_FCOE_IDX(i)) {
- union host_hc_status_block *sb =
- &bnx2x_fp(bp, i, status_blk);
- memset(sb, 0, sizeof(union host_hc_status_block));
- bnx2x_fp(bp, i, status_blk_mapping) = 0;
- } else {
-#endif
- /* status blocks */
- if (CHIP_IS_E2(bp))
- BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
- bnx2x_fp(bp, i, status_blk_mapping),
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
- bnx2x_fp(bp, i, status_blk_mapping),
- sizeof(struct host_hc_status_block_e1x));
-#ifdef BCM_CNIC
- }
-#endif
- }
- /* Rx */
- for_each_rx_queue(bp, i) {
-
- /* fastpath rx rings: rx_buf rx_desc rx_comp */
- BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
- BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
- bnx2x_fp(bp, i, rx_desc_mapping),
- sizeof(struct eth_rx_bd) * NUM_RX_BD);
-
- BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
- bnx2x_fp(bp, i, rx_comp_mapping),
- sizeof(struct eth_fast_path_rx_cqe) *
- NUM_RCQ_BD);
-
- /* SGE ring */
- BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
- BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
- bnx2x_fp(bp, i, rx_sge_mapping),
- BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
- }
- /* Tx */
- for_each_tx_queue(bp, i) {
-
- /* fastpath tx rings: tx_buf tx_desc */
- BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
- BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
- bnx2x_fp(bp, i, tx_desc_mapping),
- sizeof(union eth_tx_bd_types) * NUM_TX_BD);
- }
+ bnx2x_free_fp_mem(bp);
/* end of fastpath */
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
@@ -6028,101 +5950,13 @@ void bnx2x_free_mem(struct bnx2x *bp)
BCM_PAGE_SIZE * NUM_EQ_PAGES);
BNX2X_FREE(bp->rx_indir_table);
-
-#undef BNX2X_PCI_FREE
-#undef BNX2X_KFREE
}
-static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
-{
- union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
- if (CHIP_IS_E2(bp)) {
- bnx2x_fp(bp, index, sb_index_values) =
- (__le16 *)status_blk.e2_sb->sb.index_values;
- bnx2x_fp(bp, index, sb_running_index) =
- (__le16 *)status_blk.e2_sb->sb.running_index;
- } else {
- bnx2x_fp(bp, index, sb_index_values) =
- (__le16 *)status_blk.e1x_sb->sb.index_values;
- bnx2x_fp(bp, index, sb_running_index) =
- (__le16 *)status_blk.e1x_sb->sb.running_index;
- }
-}
int bnx2x_alloc_mem(struct bnx2x *bp)
{
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset(x, 0, size); \
- } while (0)
-
-#define BNX2X_ALLOC(x, size) \
- do { \
- x = kzalloc(size, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- } while (0)
-
- int i;
-
- /* fastpath */
- /* Common */
- for_each_queue(bp, i) {
- union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
- bnx2x_fp(bp, i, bp) = bp;
- /* status blocks */
-#ifdef BCM_CNIC
- if (!IS_FCOE_IDX(i)) {
-#endif
- if (CHIP_IS_E2(bp))
- BNX2X_PCI_ALLOC(sb->e2_sb,
- &bnx2x_fp(bp, i, status_blk_mapping),
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(sb->e1x_sb,
- &bnx2x_fp(bp, i, status_blk_mapping),
- sizeof(struct host_hc_status_block_e1x));
-#ifdef BCM_CNIC
- }
-#endif
- set_sb_shortcuts(bp, i);
- }
- /* Rx */
- for_each_queue(bp, i) {
-
- /* fastpath rx rings: rx_buf rx_desc rx_comp */
- BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
- sizeof(struct sw_rx_bd) * NUM_RX_BD);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
- &bnx2x_fp(bp, i, rx_desc_mapping),
- sizeof(struct eth_rx_bd) * NUM_RX_BD);
-
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
- &bnx2x_fp(bp, i, rx_comp_mapping),
- sizeof(struct eth_fast_path_rx_cqe) *
- NUM_RCQ_BD);
-
- /* SGE ring */
- BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
- sizeof(struct sw_rx_page) * NUM_RX_SGE);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
- &bnx2x_fp(bp, i, rx_sge_mapping),
- BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
- }
- /* Tx */
- for_each_queue(bp, i) {
-
- /* fastpath tx rings: tx_buf tx_desc */
- BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
- sizeof(struct sw_tx_bd) * NUM_TX_BD);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
- &bnx2x_fp(bp, i, tx_desc_mapping),
- sizeof(union eth_tx_bd_types) * NUM_TX_BD);
- }
- /* end of fastpath */
+ if (bnx2x_gunzip_init(bp))
+ return -ENOMEM;
#ifdef BCM_CNIC
if (CHIP_IS_E2(bp))
@@ -6162,14 +5996,18 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
TSTORM_INDIRECTION_TABLE_SIZE);
+
+ /* fastpath */
+ /* need to be done at the end, since it's self adjusting to amount
+ * of memory available for RSS queues
+ */
+ if (bnx2x_alloc_fp_mem(bp))
+ goto alloc_mem_err;
return 0;
alloc_mem_err:
bnx2x_free_mem(bp);
return -ENOMEM;
-
-#undef BNX2X_PCI_ALLOC
-#undef BNX2X_ALLOC
}
/*
@@ -6197,14 +6035,14 @@ static int bnx2x_func_stop(struct bnx2x *bp)
}
/**
- * Sets a MAC in a CAM for a few L2 Clients for E1x chips
+ * bnx2x_set_mac_addr_gen - set a MAC in a CAM for a few L2 Clients for E1x chips
*
- * @param bp driver descriptor
- * @param set set or clear an entry (1 or 0)
- * @param mac pointer to a buffer containing a MAC
- * @param cl_bit_vec bit vector of clients to register a MAC for
- * @param cam_offset offset in a CAM to use
- * @param is_bcast is the set MAC a broadcast address (for E1 only)
+ * @bp: driver handle
+ * @set: set or clear an entry (1 or 0)
+ * @mac: pointer to a buffer containing a MAC
+ * @cl_bit_vec: bit vector of clients to register a MAC for
+ * @cam_offset: offset in a CAM to use
+ * @is_bcast: is the set MAC a broadcast address (for E1 only)
*/
static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
u32 cl_bit_vec, u8 cam_offset,
@@ -6564,14 +6402,13 @@ void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
#ifdef BCM_CNIC
/**
- * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
- * MAC(s). This function will wait until the ramdord completion
- * returns.
+ * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
*
- * @param bp driver handle
- * @param set set or clear the CAM entry
+ * @bp: driver handle
+ * @set: set or clear the CAM entry
*
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ * This function will wait until the ramdord completion returns.
+ * Return 0 if success, -ENODEV if ramrod doesn't return.
*/
static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
{
@@ -6592,14 +6429,13 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
}
/**
- * Set FCoE L2 MAC(s) at the next enties in the CAM after the
- * ETH MAC(s). This function will wait until the ramdord
- * completion returns.
+ * bnx2x_set_fip_eth_mac_addr - set FCoE L2 MAC(s)
*
- * @param bp driver handle
- * @param set set or clear the CAM entry
+ * @bp: driver handle
+ * @set: set or clear the CAM entry
*
- * @return 0 if cussess, -ENODEV if ramrod doesn't return.
+ * This function will wait until the ramrod completion returns.
+ * Returns 0 if success, -ENODEV if ramrod doesn't return.
*/
int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
{
@@ -6803,12 +6639,11 @@ static int bnx2x_setup_fw_client(struct bnx2x *bp,
}
/**
- * Configure interrupt mode according to current configuration.
- * In case of MSI-X it will also try to enable MSI-X.
+ * bnx2x_set_int_mode - configure interrupt mode
*
- * @param bp
+ * @bp: driver handle
*
- * @return int
+ * In case of MSI-X it will also try to enable MSI-X.
*/
static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
{
@@ -7392,10 +7227,11 @@ static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
}
-/* Restore the value of the `magic' bit.
+/**
+ * bnx2x_clp_reset_done - restore the value of the `magic' bit.
*
- * @param pdev Device handle.
- * @param magic_val Old value of the `magic' bit.
+ * @bp: driver handle
+ * @magic_val: old value of the `magic' bit.
*/
static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
{
@@ -7406,10 +7242,12 @@ static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
}
/**
- * Prepares for MCP reset: takes care of CLP configurations.
+ * bnx2x_reset_mcp_prep - prepare for MCP reset.
+ *
+ * @bp: driver handle
+ * @magic_val: old value of 'magic' bit.
*
- * @param bp
- * @param magic_val Old value of 'magic' bit.
+ * Takes care of CLP configurations.
*/
static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
{
@@ -7434,10 +7272,10 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
#define MCP_ONE_TIMEOUT 100 /* 100 ms */
-/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
- * depending on the HW type.
+/**
+ * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
*
- * @param bp
+ * @bp: driver handle
*/
static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
{
@@ -8059,13 +7897,9 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
(val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
- if (BP_E1HVN(bp) == 0) {
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
- bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
- } else {
- /* no WOL capability for E1HVN != 0 */
- bp->flags |= NO_WOL_FLAG;
- }
+ pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+ bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+
BNX2X_DEV_INFO("%sWoL capable\n",
(bp->flags & NO_WOL_FLAG) ? "not " : "");
@@ -8571,15 +8405,6 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
BNX2X_DEV_INFO("Read iSCSI MAC: "
"0x%x:0x%04x\n", val2, val);
bnx2x_set_mac_buf(iscsi_mac, val, val2);
-
- /* Disable iSCSI OOO if MAC configuration is
- * invalid.
- */
- if (!is_valid_ether_addr(iscsi_mac)) {
- bp->flags |= NO_ISCSI_OOO_FLAG |
- NO_ISCSI_FLAG;
- memset(iscsi_mac, 0, ETH_ALEN);
- }
} else
bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
@@ -8592,13 +8417,6 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
"0x%x:0x%04x\n", val2, val);
bnx2x_set_mac_buf(fip_mac, val, val2);
- /* Disable FCoE if MAC configuration is
- * invalid.
- */
- if (!is_valid_ether_addr(fip_mac)) {
- bp->flags |= NO_FCOE_FLAG;
- memset(bp->fip_mac, 0, ETH_ALEN);
- }
} else
bp->flags |= NO_FCOE_FLAG;
}
@@ -8629,13 +8447,29 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
else if (!IS_MF(bp))
memcpy(fip_mac, iscsi_mac, ETH_ALEN);
}
+
+ /* Disable iSCSI if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(iscsi_mac)) {
+ bp->flags |= NO_ISCSI_FLAG;
+ memset(iscsi_mac, 0, ETH_ALEN);
+ }
+
+ /* Disable FCoE if MAC configuration is
+ * invalid.
+ */
+ if (!is_valid_ether_addr(fip_mac)) {
+ bp->flags |= NO_FCOE_FLAG;
+ memset(bp->fip_mac, 0, ETH_ALEN);
+ }
#endif
}
static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
{
int /*abs*/func = BP_ABS_FUNC(bp);
- int vn, port;
+ int vn;
u32 val = 0;
int rc = 0;
@@ -8670,7 +8504,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = 0;
bp->mf_mode = 0;
vn = BP_E1HVN(bp);
- port = BP_PORT(bp);
if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
DP(NETIF_MSG_PROBE,
@@ -8685,7 +8518,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
E1H_FUNC_MAX * sizeof(struct drv_func_mb);
/*
* get mf configuration:
- * 1. existance of MF configuration
+ * 1. existence of MF configuration
* 2. MAC address must be legal (check only upper bytes)
* for Switch-Independent mode;
* OVLAN must be legal for Switch-Dependent mode
@@ -8727,7 +8560,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
- DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
+ DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
val);
}
}
@@ -8904,8 +8737,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->multi_mode = multi_mode;
bp->int_mode = int_mode;
- bp->dev->features |= NETIF_F_GRO;
-
/* Set TPA flags */
if (disable_tpa) {
bp->flags &= ~TPA_ENABLE_FLAG;
@@ -8925,8 +8756,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->tx_ring_size = MAX_TX_AVAIL;
- bp->rx_csum = 1;
-
/* make sure that the numbers are in the right granularity */
bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
@@ -9304,6 +9133,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = bnx2x_ioctl,
.ndo_change_mtu = bnx2x_change_mtu,
+ .ndo_fix_features = bnx2x_fix_features,
+ .ndo_set_features = bnx2x_set_features,
.ndo_tx_timeout = bnx2x_tx_timeout,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
@@ -9430,20 +9261,17 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
dev->netdev_ops = &bnx2x_netdev_ops;
bnx2x_set_ethtool_ops(dev);
- dev->features |= NETIF_F_SG;
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- if (bp->flags & USING_DAC_FLAG)
- dev->features |= NETIF_F_HIGHDMA;
- dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
- dev->features |= NETIF_F_TSO6;
- dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
- dev->vlan_features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
+
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
+
+ dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
if (bp->flags & USING_DAC_FLAG)
- dev->vlan_features |= NETIF_F_HIGHDMA;
- dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
- dev->vlan_features |= NETIF_F_TSO6;
+ dev->features |= NETIF_F_HIGHDMA;
#ifdef BCM_DCBNL
dev->dcbnl_ops = &bnx2x_dcbnl_ops;
@@ -9777,7 +9605,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
#endif
- /* Configure interupt mode: try to enable MSI-X/MSI if
+ /* Configure interrupt mode: try to enable MSI-X/MSI if
* needed, set bp->num_queues appropriately.
*/
bnx2x_set_int_mode(bp);
@@ -10342,6 +10170,11 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
break;
}
+ case DRV_CTL_ISCSI_STOPPED_CMD: {
+ bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
+ break;
+ }
+
default:
BNX2X_ERR("unknown command %x\n", ctl->cmd);
rc = -EINVAL;
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 1c89f19a4425..86bba25d2d3f 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
/* bnx2x_reg.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -175,9 +175,9 @@
the initial credit value; read returns the current value of the credit
counter. Must be initialized to 1 at start-up. */
#define CCM_REG_CFC_INIT_CRD 0xd0204
-/* [RW 2] Auxillary counter flag Q number 1. */
+/* [RW 2] Auxiliary counter flag Q number 1. */
#define CCM_REG_CNT_AUX1_Q 0xd00c8
-/* [RW 2] Auxillary counter flag Q number 2. */
+/* [RW 2] Auxiliary counter flag Q number 2. */
#define CCM_REG_CNT_AUX2_Q 0xd00cc
/* [RW 28] The CM header value for QM request (primary). */
#define CCM_REG_CQM_CCM_HDR_P 0xd008c
@@ -457,13 +457,13 @@
#define CSDM_REG_AGG_INT_MODE_9 0xc21dc
/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
#define CSDM_REG_CFC_RSP_START_ADDR 0xc2008
-/* [RW 16] The maximum value of the competion counter #0 */
+/* [RW 16] The maximum value of the completion counter #0 */
#define CSDM_REG_CMP_COUNTER_MAX0 0xc201c
-/* [RW 16] The maximum value of the competion counter #1 */
+/* [RW 16] The maximum value of the completion counter #1 */
#define CSDM_REG_CMP_COUNTER_MAX1 0xc2020
-/* [RW 16] The maximum value of the competion counter #2 */
+/* [RW 16] The maximum value of the completion counter #2 */
#define CSDM_REG_CMP_COUNTER_MAX2 0xc2024
-/* [RW 16] The maximum value of the competion counter #3 */
+/* [RW 16] The maximum value of the completion counter #3 */
#define CSDM_REG_CMP_COUNTER_MAX3 0xc2028
/* [RW 13] The start address in the internal RAM for the completion
counters. */
@@ -851,7 +851,7 @@
#define IGU_REG_ATTN_MSG_ADDR_L 0x130120
/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
* 1-pending). [2:0] = PFID. Pending means attention message was sent; but
- * write done didnt receive. */
+ * write done didn't receive. */
#define IGU_REG_ATTN_WRITE_DONE_PENDING 0x130030
#define IGU_REG_BLOCK_CONFIGURATION 0x130000
#define IGU_REG_COMMAND_REG_32LSB_DATA 0x130124
@@ -862,7 +862,7 @@
#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP 0x130200
/* [R 5] Debug: ctrl_fsm */
#define IGU_REG_CTRL_FSM 0x130064
-/* [R 1] data availble for error memory. If this bit is clear do not red
+/* [R 1] data available for error memory. If this bit is clear do not red
* from error_handling_memory. */
#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
/* [RW 11] Parity mask register #0 read/write */
@@ -3015,7 +3015,7 @@
block. Should be used for close the gates. */
#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
- should update accoring to 'hst_discard_doorbells' register when the state
+ should update according to 'hst_discard_doorbells' register when the state
machine is idle */
#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
/* [RW 1] When 1; new internal writes arriving to the block are discarded.
@@ -3023,7 +3023,7 @@
#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
means this PSWHST is discarding inputs from this client. Each bit should
- update accoring to 'hst_discard_internal_writes' register when the state
+ update according to 'hst_discard_internal_writes' register when the state
machine is idle. */
#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS 0x10309c
/* [WB 160] Used for initialization of the inbound interrupts memory */
@@ -3822,13 +3822,13 @@
#define TSDM_REG_AGG_INT_T_1 0x420bc
/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
#define TSDM_REG_CFC_RSP_START_ADDR 0x42008
-/* [RW 16] The maximum value of the competion counter #0 */
+/* [RW 16] The maximum value of the completion counter #0 */
#define TSDM_REG_CMP_COUNTER_MAX0 0x4201c
-/* [RW 16] The maximum value of the competion counter #1 */
+/* [RW 16] The maximum value of the completion counter #1 */
#define TSDM_REG_CMP_COUNTER_MAX1 0x42020
-/* [RW 16] The maximum value of the competion counter #2 */
+/* [RW 16] The maximum value of the completion counter #2 */
#define TSDM_REG_CMP_COUNTER_MAX2 0x42024
-/* [RW 16] The maximum value of the competion counter #3 */
+/* [RW 16] The maximum value of the completion counter #3 */
#define TSDM_REG_CMP_COUNTER_MAX3 0x42028
/* [RW 13] The start address in the internal RAM for the completion
counters. */
@@ -4284,13 +4284,13 @@
#define USDM_REG_AGG_INT_T_6 0xc40d0
/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
#define USDM_REG_CFC_RSP_START_ADDR 0xc4008
-/* [RW 16] The maximum value of the competion counter #0 */
+/* [RW 16] The maximum value of the completion counter #0 */
#define USDM_REG_CMP_COUNTER_MAX0 0xc401c
-/* [RW 16] The maximum value of the competion counter #1 */
+/* [RW 16] The maximum value of the completion counter #1 */
#define USDM_REG_CMP_COUNTER_MAX1 0xc4020
-/* [RW 16] The maximum value of the competion counter #2 */
+/* [RW 16] The maximum value of the completion counter #2 */
#define USDM_REG_CMP_COUNTER_MAX2 0xc4024
-/* [RW 16] The maximum value of the competion counter #3 */
+/* [RW 16] The maximum value of the completion counter #3 */
#define USDM_REG_CMP_COUNTER_MAX3 0xc4028
/* [RW 13] The start address in the internal RAM for the completion
counters. */
@@ -4798,13 +4798,13 @@
#define XSDM_REG_AGG_INT_MODE_1 0x1661bc
/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
#define XSDM_REG_CFC_RSP_START_ADDR 0x166008
-/* [RW 16] The maximum value of the competion counter #0 */
+/* [RW 16] The maximum value of the completion counter #0 */
#define XSDM_REG_CMP_COUNTER_MAX0 0x16601c
-/* [RW 16] The maximum value of the competion counter #1 */
+/* [RW 16] The maximum value of the completion counter #1 */
#define XSDM_REG_CMP_COUNTER_MAX1 0x166020
-/* [RW 16] The maximum value of the competion counter #2 */
+/* [RW 16] The maximum value of the completion counter #2 */
#define XSDM_REG_CMP_COUNTER_MAX2 0x166024
-/* [RW 16] The maximum value of the competion counter #3 */
+/* [RW 16] The maximum value of the completion counter #3 */
#define XSDM_REG_CMP_COUNTER_MAX3 0x166028
/* [RW 13] The start address in the internal RAM for the completion
counters. */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 3445ded6674f..e535bfa08945 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1,6 +1,6 @@
/* bnx2x_stats.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index 596798c47452..45d14d8bc1aa 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -1,6 +1,6 @@
/* bnx2x_stats.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2010 Broadcom Corporation
+ * Copyright (c) 2007-2011 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 3c5c014e82b2..4c21bf6b8b2f 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -9,6 +9,3 @@ bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
proc-$(CONFIG_PROC_FS) += bond_procfs.o
bonding-objs += $(proc-y)
-ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
-bonding-objs += $(ipv6-y)
-
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 494bf960442d..c7537abca4f2 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -716,11 +716,9 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
static u32 __get_agg_bandwidth(struct aggregator *aggregator)
{
u32 bandwidth = 0;
- u32 basic_speed;
if (aggregator->num_of_ports) {
- basic_speed = __get_link_speed(aggregator->lag_ports);
- switch (basic_speed) {
+ switch (__get_link_speed(aggregator->lag_ports)) {
case AD_LINK_SPEED_BITMASK_1MBPS:
bandwidth = aggregator->num_of_ports;
break;
@@ -1482,8 +1480,11 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
static int agg_device_up(const struct aggregator *agg)
{
- return (netif_running(agg->slave->dev) &&
- netif_carrier_ok(agg->slave->dev));
+ struct port *port = agg->lag_ports;
+ if (!port)
+ return 0;
+ return (netif_running(port->slave->dev) &&
+ netif_carrier_ok(port->slave->dev));
}
/**
@@ -2402,14 +2403,6 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
struct ad_info ad_info;
int res = 1;
- /* make sure that the slaves list will
- * not change during tx
- */
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
-
if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n",
dev->name);
@@ -2463,39 +2456,20 @@ out:
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
-int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev)
+void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
- struct bonding *bond = netdev_priv(dev);
- struct slave *slave = NULL;
- int ret = NET_RX_DROP;
-
- if (!(dev->flags & IFF_MASTER))
- goto out;
-
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- goto out;
+ if (skb->protocol != PKT_TYPE_LACPDU)
+ return;
if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
- goto out;
+ return;
read_lock(&bond->lock);
- slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
- if (!slave)
- goto out_unlock;
-
bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len);
-
- ret = NET_RX_SUCCESS;
-
-out_unlock:
read_unlock(&bond->lock);
-out:
- dev_kfree_skb(skb);
-
- return ret;
}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index b28baff70864..291dbd4d1f3d 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -258,7 +258,6 @@ struct ad_bond_info {
* requested
*/
struct timer_list ad_timer;
- struct packet_type ad_pkt_type;
};
struct ad_slave_info {
@@ -280,7 +279,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave);
void bond_3ad_handle_link_change(struct slave *slave, char link);
int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
-int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev);
+void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
#endif //__BOND_3AD_H__
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9bc5de3e04a8..8f2d2e7c70e5 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -176,7 +176,7 @@ static int tlb_initialize(struct bonding *bond)
bond_info->tx_hashtbl = new_hashtbl;
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
- tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
+ tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
}
_unlock_tx_hashtbl(bond);
@@ -308,49 +308,33 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
_unlock_rx_hashtbl(bond);
}
-static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct packet_type *ptype, struct net_device *orig_dev)
+static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
- struct bonding *bond;
- struct arp_pkt *arp = (struct arp_pkt *)skb->data;
- int res = NET_RX_DROP;
+ struct arp_pkt *arp;
- while (bond_dev->priv_flags & IFF_802_1Q_VLAN)
- bond_dev = vlan_dev_real_dev(bond_dev);
-
- if (!(bond_dev->priv_flags & IFF_BONDING) ||
- !(bond_dev->flags & IFF_MASTER))
- goto out;
+ if (skb->protocol != cpu_to_be16(ETH_P_ARP))
+ return;
+ arp = (struct arp_pkt *) skb->data;
if (!arp) {
pr_debug("Packet has no ARP data\n");
- goto out;
+ return;
}
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- goto out;
-
- if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
- goto out;
+ if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
+ return;
if (skb->len < sizeof(struct arp_pkt)) {
pr_debug("Packet is too small to be an ARP\n");
- goto out;
+ return;
}
if (arp->op_code == htons(ARPOP_REPLY)) {
/* update rx hash table for this ARP */
- bond = netdev_priv(bond_dev);
rlb_update_entry_from_arp(bond, arp);
pr_debug("Server received an ARP Reply from client\n");
}
-
- res = NET_RX_SUCCESS;
-
-out:
- dev_kfree_skb(skb);
-
- return res;
}
/* Caller must hold bond lock for read */
@@ -701,7 +685,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
*/
rlb_choose_channel(skb, bond);
- /* The ARP relpy packets must be delayed so that
+ /* The ARP reply packets must be delayed so that
* they can cancel out the influence of the ARP request.
*/
bond->alb_info.rlb_update_delay_counter = RLB_UPDATE_DELAY;
@@ -759,7 +743,6 @@ static void rlb_init_table_entry(struct rlb_client_info *entry)
static int rlb_initialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- struct packet_type *pk_type = &(BOND_ALB_INFO(bond).rlb_pkt_type);
struct rlb_client_info *new_hashtbl;
int size = RLB_HASH_TABLE_SIZE * sizeof(struct rlb_client_info);
int i;
@@ -784,13 +767,8 @@ static int rlb_initialize(struct bonding *bond)
_unlock_rx_hashtbl(bond);
- /*initialize packet type*/
- pk_type->type = cpu_to_be16(ETH_P_ARP);
- pk_type->dev = bond->dev;
- pk_type->func = rlb_arp_recv;
-
/* register to receive ARPs */
- dev_add_pack(pk_type);
+ bond->recv_probe = rlb_arp_recv;
return 0;
}
@@ -799,8 +777,6 @@ static void rlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- dev_remove_pack(&(bond_info->rlb_pkt_type));
-
_lock_rx_hashtbl(bond);
kfree(bond_info->rx_hashtbl);
@@ -1042,7 +1018,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
*
* If the permanent hw address of @slave is @bond's hw address, we need to
* find a different hw address to give @slave, that isn't in use by any other
- * slave in the bond. This address must be, of course, one of the premanent
+ * slave in the bond. This address must be, of course, one of the permanent
* addresses of the other slaves.
*
* We go over the slave list, and for each slave there we compare its
@@ -1249,16 +1225,10 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
skb_reset_mac_header(skb);
eth_data = eth_hdr(skb);
- /* make sure that the curr_active_slave and the slaves list do
- * not change during tx
+ /* make sure that the curr_active_slave do not change during tx
*/
- read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
- if (!BOND_IS_OK(bond)) {
- goto out;
- }
-
switch (ntohs(skb->protocol)) {
case ETH_P_IP: {
const struct iphdr *iph = ip_hdr(skb);
@@ -1358,13 +1328,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
}
-out:
if (res) {
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
read_unlock(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 4b3e35878406..90f140a2d197 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -75,8 +75,8 @@ struct tlb_client_info {
* gave this entry index.
*/
u32 tx_bytes; /* Each Client accumulates the BytesTx that
- * were tranmitted to it, and after each
- * CallBack the LoadHistory is devided
+ * were transmitted to it, and after each
+ * CallBack the LoadHistory is divided
* by the balance interval
*/
u32 load_history; /* This field contains the amount of Bytes
@@ -122,7 +122,6 @@ struct tlb_slave_info {
};
struct alb_bond_info {
- struct timer_list alb_timer;
struct tlb_client_info *tx_hashtbl; /* Dynamically allocated */
spinlock_t tx_hashtbl_lock;
u32 unbalanced_load;
@@ -130,7 +129,6 @@ struct alb_bond_info {
int lp_counter;
/* -------- rlb parameters -------- */
int rlb_enabled;
- struct packet_type rlb_pkt_type;
struct rlb_client_info *rx_hashtbl; /* Receive hash table */
spinlock_t rx_hashtbl_lock;
u32 rx_hashtbl_head;
@@ -140,7 +138,6 @@ struct alb_bond_info {
struct slave *next_rx_slave;/* next slave to be assigned
* to a new rx client for
*/
- u32 rlb_interval_counter;
u8 primary_is_promisc; /* boolean */
u32 rlb_promisc_timeout_counter;/* counts primary
* promiscuity time
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 16d6fe954695..088fd845ffdf 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -89,8 +89,7 @@
static int max_bonds = BOND_DEFAULT_MAX_BONDS;
static int tx_queues = BOND_DEFAULT_TX_QUEUES;
-static int num_grat_arp = 1;
-static int num_unsol_na = 1;
+static int num_peer_notif = 1;
static int miimon = BOND_LINK_MON_INTERV;
static int updelay;
static int downdelay;
@@ -113,10 +112,10 @@ module_param(max_bonds, int, 0);
MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
module_param(tx_queues, int, 0);
MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
-module_param(num_grat_arp, int, 0644);
-MODULE_PARM_DESC(num_grat_arp, "Number of gratuitous ARP packets to send on failover event");
-module_param(num_unsol_na, int, 0644);
-MODULE_PARM_DESC(num_unsol_na, "Number of unsolicited IPv6 Neighbor Advertisements packets to send on failover event");
+module_param_named(num_grat_arp, num_peer_notif, int, 0644);
+MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on failover event (alias of num_unsol_na)");
+module_param_named(num_unsol_na, num_peer_notif, int, 0644);
+MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on failover event (alias of num_grat_arp)");
module_param(miimon, int, 0);
MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
module_param(updelay, int, 0);
@@ -234,7 +233,6 @@ struct bond_parm_tbl ad_select_tbl[] = {
/*-------------------------- Forward declarations ---------------------------*/
-static void bond_send_gratuitous_arp(struct bonding *bond);
static int bond_init(struct net_device *bond_dev);
static void bond_uninit(struct net_device *bond_dev);
@@ -346,32 +344,6 @@ out:
}
/**
- * bond_has_challenged_slaves
- * @bond: the bond we're working on
- *
- * Searches the slave list. Returns 1 if a vlan challenged slave
- * was found, 0 otherwise.
- *
- * Assumes bond->lock is held.
- */
-static int bond_has_challenged_slaves(struct bonding *bond)
-{
- struct slave *slave;
- int i;
-
- bond_for_each_slave(bond, slave, i) {
- if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
- pr_debug("found VLAN challenged slave - %s\n",
- slave->dev->name);
- return 1;
- }
- }
-
- pr_debug("no VLAN challenged slaves found\n");
- return 0;
-}
-
-/**
* bond_next_vlan - safely skip to the next item in the vlans list.
* @bond: the bond we're working on
* @curr: item we're advancing from
@@ -631,7 +603,8 @@ down:
static int bond_update_speed_duplex(struct slave *slave)
{
struct net_device *slave_dev = slave->dev;
- struct ethtool_cmd etool;
+ struct ethtool_cmd etool = { .cmd = ETHTOOL_GSET };
+ u32 slave_speed;
int res;
/* Fake speed and duplex */
@@ -645,7 +618,8 @@ static int bond_update_speed_duplex(struct slave *slave)
if (res < 0)
return -1;
- switch (etool.speed) {
+ slave_speed = ethtool_cmd_speed(&etool);
+ switch (slave_speed) {
case SPEED_10:
case SPEED_100:
case SPEED_1000:
@@ -663,7 +637,7 @@ static int bond_update_speed_duplex(struct slave *slave)
return -1;
}
- slave->speed = etool.speed;
+ slave->speed = slave_speed;
slave->duplex = etool.duplex;
return 0;
@@ -1087,6 +1061,21 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
return bestslave;
}
+static bool bond_should_notify_peers(struct bonding *bond)
+{
+ struct slave *slave = bond->curr_active_slave;
+
+ pr_debug("bond_should_notify_peers: bond %s slave %s\n",
+ bond->dev->name, slave ? slave->dev->name : "NULL");
+
+ if (!slave || !bond->send_peer_notif ||
+ test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
+ return false;
+
+ bond->send_peer_notif--;
+ return true;
+}
+
/**
* change_active_interface - change the active slave into the specified one
* @bond: our bonding struct
@@ -1154,6 +1143,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_set_slave_inactive_flags(old_active);
if (new_active) {
+ bool should_notify_peers = false;
+
bond_set_slave_active_flags(new_active);
if (bond->params.fail_over_mac)
@@ -1161,17 +1152,19 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
old_active);
if (netif_running(bond->dev)) {
- bond->send_grat_arp = bond->params.num_grat_arp;
- bond_send_gratuitous_arp(bond);
-
- bond->send_unsol_na = bond->params.num_unsol_na;
- bond_send_unsolicited_na(bond);
+ bond->send_peer_notif =
+ bond->params.num_peer_notif;
+ should_notify_peers =
+ bond_should_notify_peers(bond);
}
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
netdev_bonding_change(bond->dev, NETDEV_BONDING_FAILOVER);
+ if (should_notify_peers)
+ netdev_bonding_change(bond->dev,
+ NETDEV_NOTIFY_PEERS);
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
@@ -1387,52 +1380,68 @@ static int bond_sethwaddr(struct net_device *bond_dev,
return 0;
}
-#define BOND_VLAN_FEATURES \
- (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \
- NETIF_F_HW_VLAN_FILTER)
-
-/*
- * Compute the common dev->feature set available to all slaves. Some
- * feature bits are managed elsewhere, so preserve those feature bits
- * on the master device.
- */
-static int bond_compute_features(struct bonding *bond)
+static u32 bond_fix_features(struct net_device *dev, u32 features)
{
struct slave *slave;
- struct net_device *bond_dev = bond->dev;
- u32 features = bond_dev->features;
- u32 vlan_features = 0;
- unsigned short max_hard_header_len = max((u16)ETH_HLEN,
- bond_dev->hard_header_len);
+ struct bonding *bond = netdev_priv(dev);
+ u32 mask;
int i;
- features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES);
- features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM;
+ read_lock(&bond->lock);
- if (!bond->first_slave)
- goto done;
+ if (!bond->first_slave) {
+ /* Disable adding VLANs to empty bond. But why? --mq */
+ features |= NETIF_F_VLAN_CHALLENGED;
+ goto out;
+ }
+ mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
+ features |= NETIF_F_ALL_FOR_ALL;
- vlan_features = bond->first_slave->dev->vlan_features;
bond_for_each_slave(bond, slave, i) {
features = netdev_increment_features(features,
slave->dev->features,
- NETIF_F_ONE_FOR_ALL);
+ mask);
+ }
+
+out:
+ read_unlock(&bond->lock);
+ return features;
+}
+
+#define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \
+ NETIF_F_SOFT_FEATURES | \
+ NETIF_F_LRO)
+
+static void bond_compute_features(struct bonding *bond)
+{
+ struct slave *slave;
+ struct net_device *bond_dev = bond->dev;
+ u32 vlan_features = BOND_VLAN_FEATURES;
+ unsigned short max_hard_header_len = ETH_HLEN;
+ int i;
+
+ read_lock(&bond->lock);
+
+ if (!bond->first_slave)
+ goto done;
+
+ bond_for_each_slave(bond, slave, i) {
vlan_features = netdev_increment_features(vlan_features,
- slave->dev->vlan_features,
- NETIF_F_ONE_FOR_ALL);
+ slave->dev->vlan_features, BOND_VLAN_FEATURES);
+
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
}
done:
- features |= (bond_dev->features & BOND_VLAN_FEATURES);
- bond_dev->features = netdev_fix_features(bond_dev, features);
- bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
+ bond_dev->vlan_features = vlan_features;
bond_dev->hard_header_len = max_hard_header_len;
- return 0;
+ read_unlock(&bond->lock);
+
+ netdev_change_features(bond_dev);
}
static void bond_setup_by_slave(struct net_device *bond_dev,
@@ -1452,27 +1461,17 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
}
/* On bonding slaves other than the currently active slave, suppress
- * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
- * ARP on active-backup slaves with arp_validate enabled.
+ * duplicates except for alb non-mcast/bcast.
*/
static bool bond_should_deliver_exact_match(struct sk_buff *skb,
struct slave *slave,
struct bonding *bond)
{
if (bond_is_slave_inactive(slave)) {
- if (slave_do_arp_validate(bond, slave) &&
- skb->protocol == __cpu_to_be16(ETH_P_ARP))
- return false;
-
if (bond->params.mode == BOND_MODE_ALB &&
skb->pkt_type != PACKET_BROADCAST &&
skb->pkt_type != PACKET_MULTICAST)
- return false;
-
- if (bond->params.mode == BOND_MODE_8023AD &&
- skb->protocol == __cpu_to_be16(ETH_P_SLOW))
return false;
-
return true;
}
return false;
@@ -1496,6 +1495,15 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
if (bond->params.arp_interval)
slave->dev->last_rx = jiffies;
+ if (bond->recv_probe) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (likely(nskb)) {
+ bond->recv_probe(nskb, bond, slave);
+ dev_kfree_skb(nskb);
+ }
+ }
+
if (bond_should_deliver_exact_match(skb, slave, bond)) {
return RX_HANDLER_EXACT;
}
@@ -1526,7 +1534,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
struct netdev_hw_addr *ha;
struct sockaddr addr;
int link_reporting;
- int old_features = bond_dev->features;
int res = 0;
if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
@@ -1559,16 +1566,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
bond_dev->name, slave_dev->name,
slave_dev->name, bond_dev->name);
- bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
}
} else {
pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
- if (bond->slave_cnt == 0) {
- /* First slave, and it is not VLAN challenged,
- * so remove the block of adding VLANs over the bond.
- */
- bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
- }
}
/*
@@ -1757,10 +1757,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
new_slave->delay = 0;
new_slave->link_failure_count = 0;
- bond_compute_features(bond);
-
write_unlock_bh(&bond->lock);
+ bond_compute_features(bond);
+
read_lock(&bond->lock);
new_slave->last_arp_rx = jiffies;
@@ -1940,7 +1940,7 @@ err_free:
kfree(new_slave);
err_undo_flags:
- bond_dev->features = old_features;
+ bond_compute_features(bond);
return res;
}
@@ -1961,6 +1961,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr addr;
+ u32 old_features = bond_dev->features;
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -2021,8 +2022,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
/* release the slave from its bond */
bond_detach_slave(bond, slave);
- bond_compute_features(bond);
-
if (bond->primary_slave == slave)
bond->primary_slave = NULL;
@@ -2066,24 +2065,23 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
*/
memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
- if (!bond->vlgrp) {
- bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
- } else {
+ if (bond->vlgrp) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
bond_dev->name);
}
- } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
- !bond_has_challenged_slaves(bond)) {
- pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
- bond_dev->name, slave_dev->name, bond_dev->name);
- bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
}
write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
+ bond_compute_features(bond);
+ if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
+ (old_features & NETIF_F_VLAN_CHALLENGED))
+ pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
+ bond_dev->name, slave_dev->name, bond_dev->name);
+
/* must do this from outside any spinlocks */
bond_destroy_slave_symlinks(bond_dev, slave_dev);
@@ -2201,8 +2199,6 @@ static int bond_release_all(struct net_device *bond_dev)
bond_alb_deinit_slave(bond, slave);
}
- bond_compute_features(bond);
-
bond_destroy_slave_symlinks(bond_dev, slave_dev);
bond_del_vlans_from_slave(bond, slave_dev);
@@ -2251,9 +2247,7 @@ static int bond_release_all(struct net_device *bond_dev)
*/
memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
- if (!bond->vlgrp) {
- bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
- } else {
+ if (bond->vlgrp) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2264,6 +2258,9 @@ static int bond_release_all(struct net_device *bond_dev)
out:
write_unlock_bh(&bond->lock);
+
+ bond_compute_features(bond);
+
return 0;
}
@@ -2493,7 +2490,7 @@ static void bond_miimon_commit(struct bonding *bond)
bond_update_speed_duplex(slave);
- pr_info("%s: link status definitely up for interface %s, %d Mbps %s duplex.\n",
+ pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
bond->dev->name, slave->dev->name,
slave->speed, slave->duplex ? "full" : "half");
@@ -2570,6 +2567,7 @@ void bond_mii_monitor(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
mii_work.work);
+ bool should_notify_peers = false;
read_lock(&bond->lock);
if (bond->kill_timers)
@@ -2578,17 +2576,7 @@ void bond_mii_monitor(struct work_struct *work)
if (bond->slave_cnt == 0)
goto re_arm;
- if (bond->send_grat_arp) {
- read_lock(&bond->curr_slave_lock);
- bond_send_gratuitous_arp(bond);
- read_unlock(&bond->curr_slave_lock);
- }
-
- if (bond->send_unsol_na) {
- read_lock(&bond->curr_slave_lock);
- bond_send_unsolicited_na(bond);
- read_unlock(&bond->curr_slave_lock);
- }
+ should_notify_peers = bond_should_notify_peers(bond);
if (bond_miimon_inspect(bond)) {
read_unlock(&bond->lock);
@@ -2608,6 +2596,12 @@ re_arm:
msecs_to_jiffies(bond->params.miimon));
out:
read_unlock(&bond->lock);
+
+ if (should_notify_peers) {
+ rtnl_lock();
+ netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+ rtnl_unlock();
+ }
}
static __be32 bond_glean_dev_ip(struct net_device *dev)
@@ -2751,44 +2745,6 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
}
}
-/*
- * Kick out a gratuitous ARP for an IP on the bonding master plus one
- * for each VLAN above us.
- *
- * Caller must hold curr_slave_lock for read or better
- */
-static void bond_send_gratuitous_arp(struct bonding *bond)
-{
- struct slave *slave = bond->curr_active_slave;
- struct vlan_entry *vlan;
- struct net_device *vlan_dev;
-
- pr_debug("bond_send_grat_arp: bond %s slave %s\n",
- bond->dev->name, slave ? slave->dev->name : "NULL");
-
- if (!slave || !bond->send_grat_arp ||
- test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
- return;
-
- bond->send_grat_arp--;
-
- if (bond->master_ip) {
- bond_arp_send(slave->dev, ARPOP_REPLY, bond->master_ip,
- bond->master_ip, 0);
- }
-
- if (!bond->vlgrp)
- return;
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- vlan_dev = vlan_group_get_device(bond->vlgrp, vlan->vlan_id);
- if (vlan->vlan_ip) {
- bond_arp_send(slave->dev, ARPOP_REPLY, vlan->vlan_ip,
- vlan->vlan_ip, vlan->vlan_id);
- }
- }
-}
-
static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
{
int i;
@@ -2806,48 +2762,26 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
}
}
-static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave)
{
struct arphdr *arp;
- struct slave *slave;
- struct bonding *bond;
unsigned char *arp_ptr;
__be32 sip, tip;
- if (dev->priv_flags & IFF_802_1Q_VLAN) {
- /*
- * When using VLANS and bonding, dev and oriv_dev may be
- * incorrect if the physical interface supports VLAN
- * acceleration. With this change ARP validation now
- * works for hosts only reachable on the VLAN interface.
- */
- dev = vlan_dev_real_dev(dev);
- orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif);
- }
-
- if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
- goto out;
+ if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
+ return;
- bond = netdev_priv(dev);
read_lock(&bond->lock);
- pr_debug("bond_arp_rcv: bond %s skb->dev %s orig_dev %s\n",
- bond->dev->name, skb->dev ? skb->dev->name : "NULL",
- orig_dev ? orig_dev->name : "NULL");
-
- slave = bond_get_slave_by_dev(bond, orig_dev);
- if (!slave || !slave_do_arp_validate(bond, slave))
- goto out_unlock;
-
- skb = skb_share_check(skb, GFP_ATOMIC);
- if (!skb)
- goto out_unlock;
+ pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
+ bond->dev->name, skb->dev->name);
- if (!pskb_may_pull(skb, arp_hdr_len(dev)))
+ if (!pskb_may_pull(skb, arp_hdr_len(bond->dev)))
goto out_unlock;
arp = arp_hdr(skb);
- if (arp->ar_hln != dev->addr_len ||
+ if (arp->ar_hln != bond->dev->addr_len ||
skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK ||
arp->ar_hrd != htons(ARPHRD_ETHER) ||
@@ -2856,9 +2790,9 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
goto out_unlock;
arp_ptr = (unsigned char *)(arp + 1);
- arp_ptr += dev->addr_len;
+ arp_ptr += bond->dev->addr_len;
memcpy(&sip, arp_ptr, 4);
- arp_ptr += 4 + dev->addr_len;
+ arp_ptr += 4 + bond->dev->addr_len;
memcpy(&tip, arp_ptr, 4);
pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
@@ -2881,9 +2815,6 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
out_unlock:
read_unlock(&bond->lock);
-out:
- dev_kfree_skb(skb);
- return NET_RX_SUCCESS;
}
/*
@@ -3243,6 +3174,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
+ bool should_notify_peers = false;
int delta_in_ticks;
read_lock(&bond->lock);
@@ -3255,17 +3187,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
if (bond->slave_cnt == 0)
goto re_arm;
- if (bond->send_grat_arp) {
- read_lock(&bond->curr_slave_lock);
- bond_send_gratuitous_arp(bond);
- read_unlock(&bond->curr_slave_lock);
- }
-
- if (bond->send_unsol_na) {
- read_lock(&bond->curr_slave_lock);
- bond_send_unsolicited_na(bond);
- read_unlock(&bond->curr_slave_lock);
- }
+ should_notify_peers = bond_should_notify_peers(bond);
if (bond_ab_arp_inspect(bond, delta_in_ticks)) {
read_unlock(&bond->lock);
@@ -3286,6 +3208,12 @@ re_arm:
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
out:
read_unlock(&bond->lock);
+
+ if (should_notify_peers) {
+ rtnl_lock();
+ netdev_bonding_change(bond->dev, NETDEV_NOTIFY_PEERS);
+ rtnl_unlock();
+ }
}
/*-------------------------- netdev event handling --------------------------*/
@@ -3339,8 +3267,8 @@ static int bond_slave_netdev_event(unsigned long event,
slave = bond_get_slave_by_dev(bond, slave_dev);
if (slave) {
- u16 old_speed = slave->speed;
- u16 old_duplex = slave->duplex;
+ u32 old_speed = slave->speed;
+ u8 old_duplex = slave->duplex;
bond_update_speed_duplex(slave);
@@ -3482,48 +3410,6 @@ static struct notifier_block bond_inetaddr_notifier = {
.notifier_call = bond_inetaddr_event,
};
-/*-------------------------- Packet type handling ---------------------------*/
-
-/* register to receive lacpdus on a bond */
-static void bond_register_lacpdu(struct bonding *bond)
-{
- struct packet_type *pk_type = &(BOND_AD_INFO(bond).ad_pkt_type);
-
- /* initialize packet type */
- pk_type->type = PKT_TYPE_LACPDU;
- pk_type->dev = bond->dev;
- pk_type->func = bond_3ad_lacpdu_recv;
-
- dev_add_pack(pk_type);
-}
-
-/* unregister to receive lacpdus on a bond */
-static void bond_unregister_lacpdu(struct bonding *bond)
-{
- dev_remove_pack(&(BOND_AD_INFO(bond).ad_pkt_type));
-}
-
-void bond_register_arp(struct bonding *bond)
-{
- struct packet_type *pt = &bond->arp_mon_pt;
-
- if (pt->type)
- return;
-
- pt->type = htons(ETH_P_ARP);
- pt->dev = bond->dev;
- pt->func = bond_arp_rcv;
- dev_add_pack(pt);
-}
-
-void bond_unregister_arp(struct bonding *bond)
-{
- struct packet_type *pt = &bond->arp_mon_pt;
-
- dev_remove_pack(pt);
- pt->type = 0;
-}
-
/*---------------------------- Hashing Policies -----------------------------*/
/*
@@ -3617,14 +3503,14 @@ static int bond_open(struct net_device *bond_dev)
queue_delayed_work(bond->wq, &bond->arp_work, 0);
if (bond->params.arp_validate)
- bond_register_arp(bond);
+ bond->recv_probe = bond_arp_rcv;
}
if (bond->params.mode == BOND_MODE_8023AD) {
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
queue_delayed_work(bond->wq, &bond->ad_work, 0);
/* register to receive LACPDUs */
- bond_register_lacpdu(bond);
+ bond->recv_probe = bond_3ad_lacpdu_recv;
bond_3ad_initiate_agg_selection(bond, 1);
}
@@ -3635,18 +3521,9 @@ static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- if (bond->params.mode == BOND_MODE_8023AD) {
- /* Unregister the receive of LACPDUs */
- bond_unregister_lacpdu(bond);
- }
-
- if (bond->params.arp_validate)
- bond_unregister_arp(bond);
-
write_lock_bh(&bond->lock);
- bond->send_grat_arp = 0;
- bond->send_unsol_na = 0;
+ bond->send_peer_notif = 0;
/* signal timers not to re-arm */
bond->kill_timers = 1;
@@ -3682,6 +3559,7 @@ static int bond_close(struct net_device *bond_dev)
*/
bond_alb_deinitialize(bond);
}
+ bond->recv_probe = NULL;
return 0;
}
@@ -4105,10 +3983,6 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
int i, slave_no, res = 1;
struct iphdr *iph = ip_hdr(skb);
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
/*
* Start with the curr_active_slave that joined the bond as the
* default for sending IGMP traffic. For failover purposes one
@@ -4155,7 +4029,7 @@ out:
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
@@ -4169,24 +4043,18 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
struct bonding *bond = netdev_priv(bond_dev);
int res = 1;
- read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
- if (!BOND_IS_OK(bond))
- goto out;
-
- if (!bond->curr_active_slave)
- goto out;
+ if (bond->curr_active_slave)
+ res = bond_dev_queue_xmit(bond, skb,
+ bond->curr_active_slave->dev);
- res = bond_dev_queue_xmit(bond, skb, bond->curr_active_slave->dev);
-
-out:
if (res)
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
read_unlock(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
@@ -4203,11 +4071,6 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
int i;
int res = 1;
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
-
slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
bond_for_each_slave(bond, slave, i) {
@@ -4227,12 +4090,11 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
}
}
-out:
if (res) {
/* no suitable interface, frame not sent */
dev_kfree_skb(skb);
}
- read_unlock(&bond->lock);
+
return NETDEV_TX_OK;
}
@@ -4247,11 +4109,6 @@ static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
int i;
int res = 1;
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond))
- goto out;
-
read_lock(&bond->curr_slave_lock);
start_at = bond->curr_active_slave;
read_unlock(&bond->curr_slave_lock);
@@ -4290,7 +4147,6 @@ out:
dev_kfree_skb(skb);
/* frame sent to all suitable interfaces */
- read_unlock(&bond->lock);
return NETDEV_TX_OK;
}
@@ -4322,10 +4178,8 @@ static inline int bond_slave_override(struct bonding *bond,
struct slave *slave = NULL;
struct slave *check_slave;
- read_lock(&bond->lock);
-
- if (!BOND_IS_OK(bond) || !skb->queue_mapping)
- goto out;
+ if (!skb->queue_mapping)
+ return 1;
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave(bond, check_slave, i) {
@@ -4341,8 +4195,6 @@ static inline int bond_slave_override(struct bonding *bond,
res = bond_dev_queue_xmit(bond, skb, slave->dev);
}
-out:
- read_unlock(&bond->lock);
return res;
}
@@ -4357,24 +4209,17 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
if (unlikely(txq >= dev->real_num_tx_queues)) {
- do
+ do {
txq -= dev->real_num_tx_queues;
- while (txq >= dev->real_num_tx_queues);
+ } while (txq >= dev->real_num_tx_queues);
}
return txq;
}
-static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
- /*
- * If we risk deadlock from transmitting this in the
- * netpoll path, tell netpoll to queue the frame for later tx
- */
- if (is_netpoll_tx_blocked(dev))
- return NETDEV_TX_BUSY;
-
if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
if (!bond_slave_override(bond, skb))
return NETDEV_TX_OK;
@@ -4404,6 +4249,29 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
+static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bonding *bond = netdev_priv(dev);
+ netdev_tx_t ret = NETDEV_TX_OK;
+
+ /*
+ * If we risk deadlock from transmitting this in the
+ * netpoll path, tell netpoll to queue the frame for later tx
+ */
+ if (is_netpoll_tx_blocked(dev))
+ return NETDEV_TX_BUSY;
+
+ read_lock(&bond->lock);
+
+ if (bond->slave_cnt)
+ ret = __bond_start_xmit(skb, dev);
+ else
+ dev_kfree_skb(skb);
+
+ read_unlock(&bond->lock);
+
+ return ret;
+}
/*
* set bond mode specific net device operations
@@ -4448,11 +4316,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
static const struct ethtool_ops bond_ethtool_ops = {
.get_drvinfo = bond_ethtool_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .get_tso = ethtool_op_get_tso,
- .get_ufo = ethtool_op_get_ufo,
- .get_flags = ethtool_op_get_flags,
};
static const struct net_device_ops bond_netdev_ops = {
@@ -4478,6 +4341,7 @@ static const struct net_device_ops bond_netdev_ops = {
#endif
.ndo_add_slave = bond_enslave,
.ndo_del_slave = bond_release,
+ .ndo_fix_features = bond_fix_features,
};
static void bond_destructor(struct net_device *bond_dev)
@@ -4533,14 +4397,14 @@ static void bond_setup(struct net_device *bond_dev)
* when there are slaves that are not hw accel
* capable
*/
- bond_dev->features |= (NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER);
- /* By default, we enable GRO on bonding devices.
- * Actual support requires lowlevel drivers are GRO ready.
- */
- bond_dev->features |= NETIF_F_GRO;
+ bond_dev->hw_features = BOND_VLAN_FEATURES |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
+ bond_dev->features |= bond_dev->hw_features;
}
static void bond_work_cancel_all(struct bonding *bond)
@@ -4724,16 +4588,10 @@ static int bond_check_params(struct bond_params *params)
use_carrier = 1;
}
- if (num_grat_arp < 0 || num_grat_arp > 255) {
- pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n",
- num_grat_arp);
- num_grat_arp = 1;
- }
-
- if (num_unsol_na < 0 || num_unsol_na > 255) {
- pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
- num_unsol_na);
- num_unsol_na = 1;
+ if (num_peer_notif < 0 || num_peer_notif > 255) {
+ pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
+ num_peer_notif);
+ num_peer_notif = 1;
}
/* reset values for 802.3ad */
@@ -4925,8 +4783,7 @@ static int bond_check_params(struct bond_params *params)
params->mode = bond_mode;
params->xmit_policy = xmit_hashtype;
params->miimon = miimon;
- params->num_grat_arp = num_grat_arp;
- params->num_unsol_na = num_unsol_na;
+ params->num_peer_notif = num_peer_notif;
params->arp_interval = arp_interval;
params->arp_validate = arp_validate_value;
params->updelay = updelay;
@@ -5025,8 +4882,9 @@ int bond_create(struct net *net, const char *name)
rtnl_lock();
- bond_dev = alloc_netdev_mq(sizeof(struct bonding), name ? name : "",
- bond_setup, tx_queues);
+ bond_dev = alloc_netdev_mq(sizeof(struct bonding),
+ name ? name : "bond%d",
+ bond_setup, tx_queues);
if (!bond_dev) {
pr_err("%s: eek! can't alloc netdev!\n", name);
rtnl_unlock();
@@ -5036,26 +4894,10 @@ int bond_create(struct net *net, const char *name)
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
- if (!name) {
- res = dev_alloc_name(bond_dev, "bond%d");
- if (res < 0)
- goto out;
- } else {
- /*
- * If we're given a name to register
- * we need to ensure that its not already
- * registered
- */
- res = -EEXIST;
- if (__dev_get_by_name(net, name) != NULL)
- goto out;
- }
-
res = register_netdevice(bond_dev);
netif_carrier_off(bond_dev);
-out:
rtnl_unlock();
if (res < 0)
bond_destructor(bond_dev);
@@ -5121,7 +4963,6 @@ static int __init bonding_init(void)
register_netdevice_notifier(&bond_netdev_notifier);
register_inetaddr_notifier(&bond_inetaddr_notifier);
- bond_register_ipv6_notifier();
out:
return res;
err:
@@ -5136,7 +4977,6 @@ static void __exit bonding_exit(void)
{
unregister_netdevice_notifier(&bond_netdev_notifier);
unregister_inetaddr_notifier(&bond_inetaddr_notifier);
- bond_unregister_ipv6_notifier();
bond_destroy_sysfs();
bond_destroy_debugfs();
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index c32ff55a34c1..c97307ddd1c9 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -4,8 +4,6 @@
#include "bonding.h"
-extern const char *bond_mode_name(int mode);
-
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
__acquires(&bond->lock)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index de87aea6d01a..4059bfc73dbf 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -422,11 +422,6 @@ static ssize_t bonding_store_arp_validate(struct device *d,
bond->dev->name, arp_validate_tbl[new_value].modename,
new_value);
- if (!bond->params.arp_validate && new_value)
- bond_register_arp(bond);
- else if (bond->params.arp_validate && !new_value)
- bond_unregister_arp(bond);
-
bond->params.arp_validate = new_value;
return count;
@@ -874,82 +869,28 @@ static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR,
bonding_show_ad_select, bonding_store_ad_select);
/*
- * Show and set the number of grat ARP to send after a failover event.
+ * Show and set the number of peer notifications to send after a failover event.
*/
-static ssize_t bonding_show_n_grat_arp(struct device *d,
- struct device_attribute *attr,
- char *buf)
+static ssize_t bonding_show_num_peer_notif(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct bonding *bond = to_bond(d);
-
- return sprintf(buf, "%d\n", bond->params.num_grat_arp);
+ return sprintf(buf, "%d\n", bond->params.num_peer_notif);
}
-static ssize_t bonding_store_n_grat_arp(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t bonding_store_num_peer_notif(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
-
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no num_grat_arp value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
- if (new_value < 0 || new_value > 255) {
- pr_err("%s: Invalid num_grat_arp value %d not in range 0-255; rejected.\n",
- bond->dev->name, new_value);
- ret = -EINVAL;
- goto out;
- } else {
- bond->params.num_grat_arp = new_value;
- }
-out:
- return ret;
+ int err = kstrtou8(buf, 10, &bond->params.num_peer_notif);
+ return err ? err : count;
}
static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR,
- bonding_show_n_grat_arp, bonding_store_n_grat_arp);
-
-/*
- * Show and set the number of unsolicited NA's to send after a failover event.
- */
-static ssize_t bonding_show_n_unsol_na(struct device *d,
- struct device_attribute *attr,
- char *buf)
-{
- struct bonding *bond = to_bond(d);
-
- return sprintf(buf, "%d\n", bond->params.num_unsol_na);
-}
-
-static ssize_t bonding_store_n_unsol_na(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int new_value, ret = count;
- struct bonding *bond = to_bond(d);
-
- if (sscanf(buf, "%d", &new_value) != 1) {
- pr_err("%s: no num_unsol_na value specified.\n",
- bond->dev->name);
- ret = -EINVAL;
- goto out;
- }
-
- if (new_value < 0 || new_value > 255) {
- pr_err("%s: Invalid num_unsol_na value %d not in range 0-255; rejected.\n",
- bond->dev->name, new_value);
- ret = -EINVAL;
- goto out;
- } else
- bond->params.num_unsol_na = new_value;
-out:
- return ret;
-}
+ bonding_show_num_peer_notif, bonding_store_num_peer_notif);
static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR,
- bonding_show_n_unsol_na, bonding_store_n_unsol_na);
+ bonding_show_num_peer_notif, bonding_store_num_peer_notif);
/*
* Show and set the MII monitor interval. There are two tricky bits
@@ -1001,7 +942,6 @@ static ssize_t bonding_store_miimon(struct device *d,
bond->dev->name);
bond->params.arp_interval = 0;
if (bond->params.arp_validate) {
- bond_unregister_arp(bond);
bond->params.arp_validate =
BOND_ARP_VALIDATE_NONE;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 90736cb4d975..ea1d005be92d 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -24,8 +24,8 @@
#include "bond_3ad.h"
#include "bond_alb.h"
-#define DRV_VERSION "3.7.0"
-#define DRV_RELDATE "June 2, 2010"
+#define DRV_VERSION "3.7.1"
+#define DRV_RELDATE "April 27, 2011"
#define DRV_NAME "bonding"
#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
@@ -39,16 +39,6 @@
netif_carrier_ok(dev))
/*
- * Checks whether bond is ready for transmit.
- *
- * Caller must hold bond->lock
- */
-#define BOND_IS_OK(bond) \
- (((bond)->dev->flags & IFF_UP) && \
- netif_running((bond)->dev) && \
- ((bond)->slave_cnt > 0))
-
-/*
* Checks whether slave is ready for transmit.
*/
#define SLAVE_IS_OK(slave) \
@@ -149,8 +139,7 @@ struct bond_params {
int mode;
int xmit_policy;
int miimon;
- int num_grat_arp;
- int num_unsol_na;
+ u8 num_peer_notif;
int arp_interval;
int arp_validate;
int use_carrier;
@@ -178,9 +167,6 @@ struct vlan_entry {
struct list_head vlan_list;
__be32 vlan_ip;
unsigned short vlan_id;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- struct in6_addr vlan_ipv6;
-#endif
};
struct slave {
@@ -196,12 +182,12 @@ struct slave {
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
inactive:1; /* indicates inactive slave */
+ u8 duplex;
u32 original_mtu;
u32 link_failure_count;
- u8 perm_hwaddr[ETH_ALEN];
- u16 speed;
- u8 duplex;
+ u32 speed;
u16 queue_id;
+ u8 perm_hwaddr[ETH_ALEN];
struct ad_slave_info ad_info; /* HUGE - better to dynamically alloc */
struct tlb_slave_info tlb_info;
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -231,11 +217,12 @@ struct bonding {
struct slave *primary_slave;
bool force_primary;
s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
+ void (*recv_probe)(struct sk_buff *, struct bonding *,
+ struct slave *);
rwlock_t lock;
rwlock_t curr_slave_lock;
s8 kill_timers;
- s8 send_grat_arp;
- s8 send_unsol_na;
+ u8 send_peer_notif;
s8 setup_by_slave;
s8 igmp_retrans;
#ifdef CONFIG_PROC_FS
@@ -260,9 +247,6 @@ struct bonding {
struct delayed_work alb_work;
struct delayed_work ad_work;
struct delayed_work mcast_work;
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
- struct in6_addr master_ipv6;
-#endif
#ifdef CONFIG_DEBUG_FS
/* debugging suport via debugfs */
struct dentry *debug_dir;
@@ -409,13 +393,12 @@ void bond_set_mode_ops(struct bonding *bond, int mode);
int bond_parse_parm(const char *mode_arg, const struct bond_parm_tbl *tbl);
void bond_select_active_slave(struct bonding *bond);
void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
-void bond_register_arp(struct bonding *);
-void bond_unregister_arp(struct bonding *);
void bond_create_debugfs(void);
void bond_destroy_debugfs(void);
void bond_debug_register(struct bonding *bond);
void bond_debug_unregister(struct bonding *bond);
void bond_debug_reregister(struct bonding *bond);
+const char *bond_mode_name(int mode);
struct bond_net {
struct net * net; /* Associated network namespace */
@@ -459,23 +442,4 @@ extern const struct bond_parm_tbl fail_over_mac_tbl[];
extern const struct bond_parm_tbl pri_reselect_tbl[];
extern struct bond_parm_tbl ad_select_tbl[];
-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-void bond_send_unsolicited_na(struct bonding *bond);
-void bond_register_ipv6_notifier(void);
-void bond_unregister_ipv6_notifier(void);
-#else
-static inline void bond_send_unsolicited_na(struct bonding *bond)
-{
- return;
-}
-static inline void bond_register_ipv6_notifier(void)
-{
- return;
-}
-static inline void bond_unregister_ipv6_notifier(void)
-{
- return;
-}
-#endif
-
#endif /* _LINUX_BONDING_H */
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 80511167f35b..731aa1193770 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -591,7 +591,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
(NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
pr_warn("ERROR, Amount of available"
- " Phys. SHM cannot accomodate current SHM "
+ " Phys. SHM cannot accommodate current SHM "
"driver configuration, Bailing out ...\n");
free_netdev(pshm_dev->pshm_netdev);
return -ENOMEM;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 20da1996d354..57e639373815 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -397,7 +397,7 @@ int cfspi_xmitlen(struct cfspi *cfspi)
int pkts = 0;
/*
- * Decommit previously commited frames.
+ * Decommit previously committed frames.
* skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead)
*/
while (skb_peek(&cfspi->chead)) {
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 1b9943a4edab..b009e03cda9e 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -98,7 +98,7 @@ void cfspi_xfer(struct work_struct *work)
cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
- /* Copy commited SPI frames after the SPI indication. */
+ /* Copy committed SPI frames after the SPI indication. */
ptr = (u8 *) cfspi->xfer.va_tx;
ptr += SPI_IND_SZ;
len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
@@ -158,7 +158,7 @@ void cfspi_xfer(struct work_struct *work)
cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
- /* Signal that we are ready to recieve data. */
+ /* Signal that we are ready to receive data. */
cfspi->dev->sig_xfer(true, cfspi->dev);
cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 57d2ffbbb433..74efb5a2ad41 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -416,7 +416,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_bytes += cf->can_dlc;
- /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
+ /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
/*
@@ -782,7 +782,7 @@ static void at91_irq_tx(struct net_device *dev, u32 reg_sr)
reg_msr = at91_read(priv, AT91_MSR(mb));
if (likely(reg_msr & AT91_MSR_MRDY &&
~reg_msr & AT91_MSR_MABT)) {
- /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
+ /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */
can_get_echo_skb(dev, mb - AT91_MB_TX_FIRST);
dev->stats.tx_packets++;
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 31552959aed7..7e5cc0bd913d 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -813,7 +813,7 @@ static int c_can_handle_state_change(struct net_device *dev,
struct sk_buff *skb;
struct can_berr_counter bec;
- /* propogate the error condition to the CAN stack */
+ /* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return 0;
@@ -887,7 +887,7 @@ static int c_can_handle_bus_err(struct net_device *dev,
if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
return 0;
- /* propogate the error condition to the CAN stack */
+ /* propagate the error condition to the CAN stack */
skb = alloc_can_err_skb(dev, &cf);
if (unlikely(!skb))
return 0;
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 102b16c6cc97..587fba48cdd9 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -274,7 +274,7 @@ static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page)
*/
/*
- * Recieve a message from the ICAN3 "old-style" firmware interface
+ * Receive a message from the ICAN3 "old-style" firmware interface
*
* LOCKING: must hold mod->lock
*
@@ -1050,7 +1050,7 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
complete(&mod->termination_comp);
break;
default:
- dev_err(mod->dev, "recieved an unknown inquiry response\n");
+ dev_err(mod->dev, "received an unknown inquiry response\n");
break;
}
}
@@ -1058,7 +1058,7 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
static void ican3_handle_unknown_message(struct ican3_dev *mod,
struct ican3_msg *msg)
{
- dev_warn(mod->dev, "recieved unknown message: spec 0x%.2x length %d\n",
+ dev_warn(mod->dev, "received unknown message: spec 0x%.2x length %d\n",
msg->spec, le16_to_cpu(msg->len));
}
@@ -1113,7 +1113,7 @@ static bool ican3_txok(struct ican3_dev *mod)
}
/*
- * Recieve one CAN frame from the hardware
+ * Receive one CAN frame from the hardware
*
* CONTEXT: must be called from user context
*/
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7513c4523ac4..330140ee266d 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -931,7 +931,8 @@ static int mcp251x_open(struct net_device *net)
priv->tx_len = 0;
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
+ pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
+ DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
if (pdata->transceiver_enable)
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c0a1bc5b1435..bd1d811c204f 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -260,7 +260,7 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
if (!ofdev->dev.of_match)
return -EINVAL;
- data = (struct mpc5xxx_can_data *)of_dev->dev.of_match->data;
+ data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
base = of_iomap(np, 0);
if (!base) {
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 74cd880c7e06..92feac68b66e 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -246,7 +246,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
out_be16(&regs->tx.idr3_2, can_id);
can_id >>= 16;
- /* EFF_FLAGS are inbetween the IDs :( */
+ /* EFF_FLAGS are between the IDs :( */
can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0)
| MSCAN_EFF_FLAGS;
} else {
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 0a8de01d52f7..f501bba1fc6f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -346,10 +346,10 @@ static void sja1000_rx(struct net_device *dev)
| (priv->read_reg(priv, REG_ID2) >> 5);
}
+ cf->can_dlc = get_can_dlc(fi & 0x0F);
if (fi & FI_RTR) {
id |= CAN_RTR_FLAG;
} else {
- cf->can_dlc = get_can_dlc(fi & 0x0F);
for (i = 0; i < cf->can_dlc; i++)
cf->data[i] = priv->read_reg(priv, dreg++);
}
@@ -425,7 +425,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->data[3] = ecc & ECC_SEG;
break;
}
- /* Error occured during transmission? */
+ /* Error occurred during transmission? */
if ((ecc & ECC_DIR) == 0)
cf->data[2] |= CAN_ERR_PROT_TX;
}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index b423965a78d1..1b49df6b2470 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -583,7 +583,9 @@ static int slcan_open(struct tty_struct *tty)
/* Done. We have linked the TTY line to a channel. */
rtnl_unlock();
tty->receive_room = 65536; /* We don't flow control */
- return sl->dev->base_addr;
+
+ /* TTY layer expects 0 on success */
+ return 0;
err_free_chan:
sl->tty = NULL;
diff --git a/drivers/net/can/softing/softing.h b/drivers/net/can/softing/softing.h
index 7ec9f4db3d52..afd7d85b6915 100644
--- a/drivers/net/can/softing/softing.h
+++ b/drivers/net/can/softing/softing.h
@@ -22,7 +22,7 @@ struct softing_priv {
struct softing *card;
struct {
int pending;
- /* variables wich hold the circular buffer */
+ /* variables which hold the circular buffer */
int echo_put;
int echo_get;
} tx;
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index aeea9f9ff6e8..7a70709d5608 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -218,7 +218,7 @@ static int softing_handle_1(struct softing *card)
ptr = buf;
cmd = *ptr++;
if (cmd == 0xff)
- /* not quite usefull, probably the card has got out */
+ /* not quite useful, probably the card has got out */
return 0;
netdev = card->net[0];
if (cmd & CMD_BUS2)
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 4d07f1ee7168..f7bbde9eb2cb 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -663,7 +663,7 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
struct can_frame *cf;
struct sk_buff *skb;
- /* propogate the error condition to the can stack */
+ /* propagate the error condition to the can stack */
skb = alloc_can_err_skb(ndev, &cf);
if (!skb) {
if (printk_ratelimit())
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index e75f1a876972..a72c7bfb4090 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -386,7 +386,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
break;
}
- /* Error occured during transmission? */
+ /* Error occurred during transmission? */
if ((ecc & SJA1000_ECC_DIR) == 0)
cf->data[2] |= CAN_ERR_PROT_TX;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index dc53c831ea95..eb8b0e600282 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -284,7 +284,7 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
break;
}
- /* Error occured during transmission? */
+ /* Error occurred during transmission? */
if (!(ecc & SJA1000_ECC_DIR))
cf->data[2] |= CAN_ERR_PROT_TX;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 3437613f0454..22ce03e55b83 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -51,7 +51,7 @@
* TX has 4 queues. currently these queues are used in a round-robin
* fashion for load balancing. They can also be used for QoS. for that
* to work, however, QoS information needs to be exposed down to the driver
- * level so that subqueues get targetted to particular transmit rings.
+ * level so that subqueues get targeted to particular transmit rings.
* alternatively, the queues can be configured via use of the all-purpose
* ioctl.
*
@@ -709,10 +709,11 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
if (ep->autoneg == AUTONEG_ENABLE)
cp->link_cntl = BMCR_ANENABLE;
else {
+ u32 speed = ethtool_cmd_speed(ep);
cp->link_cntl = 0;
- if (ep->speed == SPEED_100)
+ if (speed == SPEED_100)
cp->link_cntl |= BMCR_SPEED100;
- else if (ep->speed == SPEED_1000)
+ else if (speed == SPEED_1000)
cp->link_cntl |= CAS_BMCR_SPEED1000;
if (ep->duplex == DUPLEX_FULL)
cp->link_cntl |= BMCR_FULLDPLX;
@@ -4605,18 +4606,17 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (bmcr & BMCR_ANENABLE) {
cmd->advertising |= ADVERTISED_Autoneg;
cmd->autoneg = AUTONEG_ENABLE;
- cmd->speed = ((speed == 10) ?
- SPEED_10 :
- ((speed == 1000) ?
- SPEED_1000 : SPEED_100));
+ ethtool_cmd_speed_set(cmd, ((speed == 10) ?
+ SPEED_10 :
+ ((speed == 1000) ?
+ SPEED_1000 : SPEED_100)));
cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
cmd->autoneg = AUTONEG_DISABLE;
- cmd->speed =
- (bmcr & CAS_BMCR_SPEED1000) ?
- SPEED_1000 :
- ((bmcr & BMCR_SPEED100) ? SPEED_100:
- SPEED_10);
+ ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ?
+ SPEED_1000 :
+ ((bmcr & BMCR_SPEED100) ?
+ SPEED_100 : SPEED_10)));
cmd->duplex =
(bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -4633,14 +4633,14 @@ static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
* settings that we configured.
*/
if (cp->link_cntl & BMCR_ANENABLE) {
- cmd->speed = 0;
+ ethtool_cmd_speed_set(cmd, 0);
cmd->duplex = 0xff;
} else {
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
if (cp->link_cntl & BMCR_SPEED100) {
- cmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(cmd, SPEED_100);
} else if (cp->link_cntl & CAS_BMCR_SPEED1000) {
- cmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
}
cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)?
DUPLEX_FULL : DUPLEX_HALF;
@@ -4653,6 +4653,7 @@ static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct cas *cp = netdev_priv(dev);
unsigned long flags;
+ u32 speed = ethtool_cmd_speed(cmd);
/* Verify the settings we care about. */
if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -4660,9 +4661,9 @@ static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
if (cmd->autoneg == AUTONEG_DISABLE &&
- ((cmd->speed != SPEED_1000 &&
- cmd->speed != SPEED_100 &&
- cmd->speed != SPEED_10) ||
+ ((speed != SPEED_1000 &&
+ speed != SPEED_100 &&
+ speed != SPEED_10) ||
(cmd->duplex != DUPLEX_HALF &&
cmd->duplex != DUPLEX_FULL)))
return -EINVAL;
@@ -5165,7 +5166,7 @@ err_out_free_res:
pci_release_regions(pdev);
err_write_cacheline:
- /* Try to restore it in case the error occured after we
+ /* Try to restore it in case the error occurred after we
* set it.
*/
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size);
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
index faf4746a0f3e..b361424d5f57 100644
--- a/drivers/net/cassini.h
+++ b/drivers/net/cassini.h
@@ -772,7 +772,7 @@
#define RX_DEBUG_INTR_WRITE_PTR_MASK 0xC0000000 /* interrupt write pointer
of the interrupt queue */
-/* flow control frames are emmitted using two PAUSE thresholds:
+/* flow control frames are emitted using two PAUSE thresholds:
* XOFF PAUSE uses pause time value pre-programmed in the Send PAUSE MAC reg
* XON PAUSE uses a pause time of 0. granularity of threshold is 64bytes.
* PAUSE thresholds defined in terms of FIFO occupancy and may be translated
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 092f31a126e6..c26d863e1697 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -264,11 +264,6 @@ struct adapter {
enum { /* adapter flags */
FULL_INIT_DONE = 1 << 0,
- TSO_CAPABLE = 1 << 2,
- TCP_CSUM_CAPABLE = 1 << 3,
- UDP_CSUM_CAPABLE = 1 << 4,
- VLAN_ACCEL_CAPABLE = 1 << 5,
- RX_CSUM_ENABLED = 1 << 6,
};
struct mdio_ops;
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 0f71304e0542..b422d83f5343 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -192,10 +192,8 @@ static void link_start(struct port_info *p)
static void enable_hw_csum(struct adapter *adapter)
{
- if (adapter->flags & TSO_CAPABLE)
+ if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
- if (adapter->flags & UDP_CSUM_CAPABLE)
- t1_tp_set_udp_checksum_offload(adapter->tp, 1);
t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
}
@@ -579,10 +577,10 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising = p->link_config.advertising;
if (netif_carrier_ok(dev)) {
- cmd->speed = p->link_config.speed;
+ ethtool_cmd_speed_set(cmd, p->link_config.speed);
cmd->duplex = p->link_config.duplex;
} else {
- cmd->speed = -1;
+ ethtool_cmd_speed_set(cmd, -1);
cmd->duplex = -1;
}
@@ -640,11 +638,12 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EOPNOTSUPP; /* can't change speed/duplex */
if (cmd->autoneg == AUTONEG_DISABLE) {
- int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
+ u32 speed = ethtool_cmd_speed(cmd);
+ int cap = speed_duplex_to_caps(speed, cmd->duplex);
- if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
+ if (!(lc->supported & cap) || (speed == SPEED_1000))
return -EINVAL;
- lc->requested_speed = cmd->speed;
+ lc->requested_speed = speed;
lc->requested_duplex = cmd->duplex;
lc->advertising = 0;
} else {
@@ -705,33 +704,6 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static u32 get_rx_csum(struct net_device *dev)
-{
- struct adapter *adapter = dev->ml_priv;
-
- return (adapter->flags & RX_CSUM_ENABLED) != 0;
-}
-
-static int set_rx_csum(struct net_device *dev, u32 data)
-{
- struct adapter *adapter = dev->ml_priv;
-
- if (data)
- adapter->flags |= RX_CSUM_ENABLED;
- else
- adapter->flags &= ~RX_CSUM_ENABLED;
- return 0;
-}
-
-static int set_tso(struct net_device *dev, u32 value)
-{
- struct adapter *adapter = dev->ml_priv;
-
- if (!(adapter->flags & TSO_CAPABLE))
- return value ? -EOPNOTSUPP : 0;
- return ethtool_op_set_tso(dev, value);
-}
-
static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
struct adapter *adapter = dev->ml_priv;
@@ -831,17 +803,12 @@ static const struct ethtool_ops t1_ethtool_ops = {
.get_eeprom = get_eeprom,
.get_pauseparam = get_pauseparam,
.set_pauseparam = set_pauseparam,
- .get_rx_csum = get_rx_csum,
- .set_rx_csum = set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
.get_link = ethtool_op_get_link,
.get_strings = get_strings,
.get_sset_count = get_sset_count,
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
- .set_tso = set_tso,
};
static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
@@ -1105,28 +1072,28 @@ static int __devinit init_one(struct pci_dev *pdev,
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
netdev->ml_priv = adapter;
- netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- netdev->features |= NETIF_F_LLTX;
+ netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_RXCSUM;
+ netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_LLTX;
- adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
if (vlan_tso_capable(adapter)) {
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- adapter->flags |= VLAN_ACCEL_CAPABLE;
netdev->features |=
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
#endif
/* T204: disable TSO */
if (!(is_T2(adapter)) || bi->port_number != 4) {
- adapter->flags |= TSO_CAPABLE;
+ netdev->hw_features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO;
}
}
netdev->netdev_ops = &cxgb_netdev_ops;
- netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
+ netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
diff --git a/drivers/net/chelsio/mv88e1xxx.c b/drivers/net/chelsio/mv88e1xxx.c
index 809047a99e96..71018a4fdf15 100644
--- a/drivers/net/chelsio/mv88e1xxx.c
+++ b/drivers/net/chelsio/mv88e1xxx.c
@@ -41,7 +41,7 @@ static void mdio_clear_bit(struct cphy *cphy, int reg, u32 bitval)
*
* PARAMS: cphy - Pointer to PHY instance data.
*
- * RETURN: 0 - Successfull reset.
+ * RETURN: 0 - Successful reset.
* -1 - Timeout.
*/
static int mv88e1xxx_reset(struct cphy *cphy, int wait)
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 7dbb16d36fff..40c7b93ababc 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -293,7 +293,7 @@ static int pm3393_enable_port(struct cmac *cmac, int which)
pm3393_enable(cmac, which);
/*
- * XXX This should be done by the PHY and preferrably not at all.
+ * XXX This should be done by the PHY and preferably not at all.
* The PHY doesn't give us link status indication on its own so have
* the link management code query it instead.
*/
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index f778b15ad3fd..b948ea737550 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -929,7 +929,7 @@ void t1_sge_intr_enable(struct sge *sge)
u32 en = SGE_INT_ENABLE;
u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
- if (sge->adapter->flags & TSO_CAPABLE)
+ if (sge->adapter->port[0].dev->hw_features & NETIF_F_TSO)
en &= ~F_PACKET_TOO_BIG;
writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
@@ -952,7 +952,7 @@ int t1_sge_intr_error_handler(struct sge *sge)
struct adapter *adapter = sge->adapter;
u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
- if (adapter->flags & TSO_CAPABLE)
+ if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
cause &= ~F_PACKET_TOO_BIG;
if (cause & F_RESPQ_EXHAUSTED)
sge->stats.respQ_empty++;
@@ -1369,6 +1369,7 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
const struct cpl_rx_pkt *p;
struct adapter *adapter = sge->adapter;
struct sge_port_stats *st;
+ struct net_device *dev;
skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad);
if (unlikely(!skb)) {
@@ -1384,9 +1385,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
__skb_pull(skb, sizeof(*p));
st = this_cpu_ptr(sge->port_stats[p->iff]);
+ dev = adapter->port[p->iff].dev;
- skb->protocol = eth_type_trans(skb, adapter->port[p->iff].dev);
- if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
+ skb->protocol = eth_type_trans(skb, dev);
+ if ((dev->features & NETIF_F_RXCSUM) && p->csum == 0xffff &&
skb->protocol == htons(ETH_P_IP) &&
(skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
++st->rx_cso_good;
@@ -1662,7 +1664,7 @@ irqreturn_t t1_interrupt(int irq, void *data)
* The code figures out how many entries the sk_buff will require in the
* cmdQ and updates the cmdQ data structure with the state once the enqueue
* has complete. Then, it doesn't access the global structure anymore, but
- * uses the corresponding fields on the stack. In conjuction with a spinlock
+ * uses the corresponding fields on the stack. In conjunction with a spinlock
* around that code, we can make the function reentrant without holding the
* lock when we actually enqueue (which might be expensive, especially on
* architectures with IO MMUs).
@@ -1838,8 +1840,7 @@ netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
- skb->ip_summed == CHECKSUM_PARTIAL &&
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
ip_hdr(skb)->protocol == IPPROTO_UDP) {
if (unlikely(skb_checksum_help(skb))) {
pr_debug("%s: unable to do udp checksum\n", dev->name);
diff --git a/drivers/net/chelsio/tp.c b/drivers/net/chelsio/tp.c
index 6222d585e447..8bed4a59e65f 100644
--- a/drivers/net/chelsio/tp.c
+++ b/drivers/net/chelsio/tp.c
@@ -152,11 +152,6 @@ void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable)
set_csum_offload(tp, F_IP_CSUM, enable);
}
-void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable)
-{
- set_csum_offload(tp, F_UDP_CSUM, enable);
-}
-
void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable)
{
set_csum_offload(tp, F_TCP_CSUM, enable);
diff --git a/drivers/net/chelsio/tp.h b/drivers/net/chelsio/tp.h
index 32fc71e58913..dfd8ce25106a 100644
--- a/drivers/net/chelsio/tp.h
+++ b/drivers/net/chelsio/tp.h
@@ -65,7 +65,6 @@ void t1_tp_intr_clear(struct petp *tp);
int t1_tp_intr_handler(struct petp *tp);
void t1_tp_get_mib_statistics(adapter_t *adap, struct tp_mib_statistics *tps);
-void t1_tp_set_udp_checksum_offload(struct petp *tp, int enable);
void t1_tp_set_tcp_checksum_offload(struct petp *tp, int enable);
void t1_tp_set_ip_checksum_offload(struct petp *tp, int enable);
int t1_tp_set_coalescing_size(struct petp *tp, unsigned int size);
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
index 106a590f0d9a..b0cb388f5e12 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/chelsio/vsc7326.c
@@ -566,7 +566,7 @@ static int mac_disable(struct cmac *mac, int which)
for (i = 0; i <= 0x3a; ++i)
vsc_write(mac->adapter, CRA(4, port, i), 0);
- /* Clear sofware counters */
+ /* Clear software counters */
memset(&mac->stats, 0, sizeof(struct cmac_statistics));
return 0;
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 8cca60e43444..cde59b4e5ef8 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2966,31 +2966,36 @@ static int cnic_service_bnx2x(void *data, void *status_blk)
return 0;
}
-static void cnic_ulp_stop(struct cnic_dev *dev)
+static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
{
- struct cnic_local *cp = dev->cnic_priv;
- int if_type;
-
- cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+ struct cnic_ulp_ops *ulp_ops;
- for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
- struct cnic_ulp_ops *ulp_ops;
+ if (if_type == CNIC_ULP_ISCSI)
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
- mutex_lock(&cnic_lock);
- ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
- lockdep_is_held(&cnic_lock));
- if (!ulp_ops) {
- mutex_unlock(&cnic_lock);
- continue;
- }
- set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+ mutex_lock(&cnic_lock);
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
+ if (!ulp_ops) {
mutex_unlock(&cnic_lock);
+ return;
+ }
+ set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+ mutex_unlock(&cnic_lock);
- if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
- ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+ if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+ ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
- clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
- }
+ clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+}
+
+static void cnic_ulp_stop(struct cnic_dev *dev)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ int if_type;
+
+ for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
+ cnic_ulp_stop_one(cp, if_type);
}
static void cnic_ulp_start(struct cnic_dev *dev)
@@ -3039,6 +3044,12 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
cnic_put(dev);
break;
+ case CNIC_CTL_STOP_ISCSI_CMD: {
+ struct cnic_local *cp = dev->cnic_priv;
+ set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
+ queue_delayed_work(cnic_wq, &cp->delete_task, 0);
+ break;
+ }
case CNIC_CTL_COMPLETION_CMD: {
u32 cid = BNX2X_SW_CID(info->data.comp.cid);
u32 l5_cid;
@@ -3562,8 +3573,12 @@ static void cnic_init_csk_state(struct cnic_sock *csk)
static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
{
+ struct cnic_local *cp = csk->dev->cnic_priv;
int err = 0;
+ if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+ return -EOPNOTSUPP;
+
if (!cnic_in_use(csk))
return -EINVAL;
@@ -3965,6 +3980,15 @@ static void cnic_delete_task(struct work_struct *work)
cp = container_of(work, struct cnic_local, delete_task.work);
dev = cp->dev;
+ if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
+ struct drv_ctl_info info;
+
+ cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
+
+ info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
+ cp->ethdev->drv_ctl(dev->netdev, &info);
+ }
+
for (i = 0; i < cp->max_cid_space; i++) {
struct cnic_context *ctx = &cp->ctx_tbl[i];
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 4456260c653c..3367a6d3a774 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -226,6 +226,7 @@ struct cnic_local {
#define CNIC_LCL_FL_KWQ_INIT 0x0
#define CNIC_LCL_FL_L2_WAIT 0x1
#define CNIC_LCL_FL_RINGS_INITED 0x2
+#define CNIC_LCL_FL_STOP_ISCSI 0x4
struct cnic_dev *dev;
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index e01b49ee3591..fdd8e46a9050 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.2.13"
-#define CNIC_MODULE_RELDATE "Jan 31, 2011"
+#define CNIC_MODULE_VERSION "2.2.14"
+#define CNIC_MODULE_RELDATE "Mar 30, 2011"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -85,6 +85,7 @@ struct kcqe {
#define CNIC_CTL_STOP_CMD 1
#define CNIC_CTL_START_CMD 2
#define CNIC_CTL_COMPLETION_CMD 3
+#define CNIC_CTL_STOP_ISCSI_CMD 4
#define DRV_CTL_IO_WR_CMD 0x101
#define DRV_CTL_IO_RD_CMD 0x102
@@ -94,6 +95,7 @@ struct kcqe {
#define DRV_CTL_START_L2_CMD 0x106
#define DRV_CTL_STOP_L2_CMD 0x107
#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
+#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
struct cnic_ctl_completion {
u32 cid;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 80c2feeefec5..e66aceb57cef 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -491,8 +491,8 @@ e100_open(struct net_device *dev)
/* allocate the irq corresponding to the receiving DMA */
- if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt,
- IRQF_SAMPLE_RANDOM, cardname, (void *)dev)) {
+ if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 0, cardname,
+ (void *)dev)) {
goto grace_exit0;
}
@@ -1383,7 +1383,7 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
spin_lock(&np->lock); /* Preempt protection */
switch (cmd) {
/* The ioctls below should be considered obsolete but are */
- /* still present for compatability with old scripts/apps */
+ /* still present for compatibility with old scripts/apps */
case SET_ETH_SPEED_10: /* 10 Mbps */
e100_set_speed(dev, 10);
break;
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index ef67be59680f..7300de5a1426 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -50,11 +50,6 @@ struct adapter;
struct sge_qset;
struct port_info;
-enum { /* rx_offload flags */
- T3_RX_CSUM = 1 << 0,
- T3_LRO = 1 << 1,
-};
-
enum mac_idx_types {
LAN_MAC_IDX = 0,
SAN_MAC_IDX,
@@ -74,7 +69,6 @@ struct port_info {
struct vlan_group *vlan_grp;
struct sge_qset *qs;
u8 port_id;
- u8 rx_offload;
u8 nqsets;
u8 first_qset;
struct cphy phy;
@@ -212,7 +206,6 @@ struct sge_qset { /* an SGE queue set */
struct sge_fl fl[SGE_RXQ_PER_SET];
struct sge_txq txq[SGE_TXQ_PER_SET];
int nomem;
- int lro_enabled;
void *lro_va;
struct net_device *netdev;
struct netdev_queue *tx_q; /* associated netdev TX queue */
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 5ccb77d078aa..056ee8c831f1 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -317,7 +317,6 @@ struct tp_params {
struct qset_params { /* SGE queue set parameters */
unsigned int polling; /* polling/interrupt service for rspq */
- unsigned int lro; /* large receive offload */
unsigned int coalesce_usecs; /* irq coalescing timer */
unsigned int rspq_size; /* # of entries in response queue */
unsigned int fl_size; /* # of entries in regular free list */
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 910893143295..9081ce037149 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -644,26 +644,6 @@ static void enable_all_napi(struct adapter *adap)
}
/**
- * set_qset_lro - Turn a queue set's LRO capability on and off
- * @dev: the device the qset is attached to
- * @qset_idx: the queue set index
- * @val: the LRO switch
- *
- * Sets LRO on or off for a particular queue set.
- * the device's features flag is updated to reflect the LRO
- * capability when all queues belonging to the device are
- * in the same state.
- */
-static void set_qset_lro(struct net_device *dev, int qset_idx, int val)
-{
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
-
- adapter->params.sge.qset[qset_idx].lro = !!val;
- adapter->sge.qs[qset_idx].lro_enabled = !!val;
-}
-
-/**
* setup_sge_qsets - configure SGE Tx/Rx/response queues
* @adap: the adapter
*
@@ -685,7 +665,6 @@ static int setup_sge_qsets(struct adapter *adap)
pi->qs = &adap->sge.qs[pi->first_qset];
for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
- set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
err = t3_sge_alloc_qset(adap, qset_idx, 1,
(adap->flags & USING_MSIX) ? qset_idx + 1 :
irq_idx,
@@ -1749,23 +1728,26 @@ static int restart_autoneg(struct net_device *dev)
return 0;
}
-static int cxgb3_phys_id(struct net_device *dev, u32 data)
+static int set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- int i;
- if (data == 0)
- data = 2;
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
- for (i = 0; i < data * 2; i++) {
+ case ETHTOOL_ID_OFF:
+ t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL, 0);
+ break;
+
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_INACTIVE:
t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
- (i & 1) ? F_GPIO0_OUT_VAL : 0);
- if (msleep_interruptible(500))
- break;
- }
- t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
F_GPIO0_OUT_VAL);
+ }
+
return 0;
}
@@ -1777,10 +1759,10 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising = p->link_config.advertising;
if (netif_carrier_ok(dev)) {
- cmd->speed = p->link_config.speed;
+ ethtool_cmd_speed_set(cmd, p->link_config.speed);
cmd->duplex = p->link_config.duplex;
} else {
- cmd->speed = -1;
+ ethtool_cmd_speed_set(cmd, -1);
cmd->duplex = -1;
}
@@ -1839,7 +1821,8 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
* being requested.
*/
if (cmd->autoneg == AUTONEG_DISABLE) {
- int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
+ u32 speed = ethtool_cmd_speed(cmd);
+ int cap = speed_duplex_to_caps(speed, cmd->duplex);
if (lc->supported & cap)
return 0;
}
@@ -1847,11 +1830,12 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
if (cmd->autoneg == AUTONEG_DISABLE) {
- int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
+ u32 speed = ethtool_cmd_speed(cmd);
+ int cap = speed_duplex_to_caps(speed, cmd->duplex);
- if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
+ if (!(lc->supported & cap) || (speed == SPEED_1000))
return -EINVAL;
- lc->requested_speed = cmd->speed;
+ lc->requested_speed = speed;
lc->requested_duplex = cmd->duplex;
lc->advertising = 0;
} else {
@@ -1907,29 +1891,6 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static u32 get_rx_csum(struct net_device *dev)
-{
- struct port_info *p = netdev_priv(dev);
-
- return p->rx_offload & T3_RX_CSUM;
-}
-
-static int set_rx_csum(struct net_device *dev, u32 data)
-{
- struct port_info *p = netdev_priv(dev);
-
- if (data) {
- p->rx_offload |= T3_RX_CSUM;
- } else {
- int i;
-
- p->rx_offload &= ~(T3_RX_CSUM | T3_LRO);
- for (i = p->first_qset; i < p->first_qset + p->nqsets; i++)
- set_qset_lro(dev, i, 0);
- }
- return 0;
-}
-
static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
struct port_info *pi = netdev_priv(dev);
@@ -2101,20 +2062,15 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.set_eeprom = set_eeprom,
.get_pauseparam = get_pauseparam,
.set_pauseparam = set_pauseparam,
- .get_rx_csum = get_rx_csum,
- .set_rx_csum = set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
.get_link = ethtool_op_get_link,
.get_strings = get_strings,
- .phys_id = cxgb3_phys_id,
+ .set_phys_id = set_phys_id,
.nway_reset = restart_autoneg,
.get_sset_count = get_sset_count,
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
.get_wol = get_wol,
- .set_tso = ethtool_op_set_tso,
};
static int in_range(int val, int lo, int hi)
@@ -2162,15 +2118,6 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
MAX_RSPQ_ENTRIES))
return -EINVAL;
- if ((adapter->flags & FULL_INIT_DONE) && t.lro > 0)
- for_each_port(adapter, i) {
- pi = adap2pinfo(adapter, i);
- if (t.qset_idx >= pi->first_qset &&
- t.qset_idx < pi->first_qset + pi->nqsets &&
- !(pi->rx_offload & T3_RX_CSUM))
- return -EINVAL;
- }
-
if ((adapter->flags & FULL_INIT_DONE) &&
(t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
@@ -2231,8 +2178,14 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
}
}
}
- if (t.lro >= 0)
- set_qset_lro(dev, t.qset_idx, t.lro);
+
+ if (t.lro >= 0) {
+ if (t.lro)
+ dev->wanted_features |= NETIF_F_GRO;
+ else
+ dev->wanted_features &= ~NETIF_F_GRO;
+ netdev_update_features(dev);
+ }
break;
}
@@ -2266,7 +2219,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
t.fl_size[0] = q->fl_size;
t.fl_size[1] = q->jumbo_size;
t.polling = q->polling;
- t.lro = q->lro;
+ t.lro = !!(dev->features & NETIF_F_GRO);
t.intr_lat = q->coalesce_usecs;
t.cong_thres = q->cong_thres;
t.qnum = q1;
@@ -3304,18 +3257,18 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->port[i] = netdev;
pi = netdev_priv(netdev);
pi->adapter = adapter;
- pi->rx_offload = T3_RX_CSUM | T3_LRO;
pi->port_id = i;
netif_carrier_off(netdev);
netdev->irq = pdev->irq;
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len - 1;
- netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
- netdev->features |= NETIF_F_GRO;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO | NETIF_F_RXCSUM;
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
netdev->netdev_ops = &cxgb_netdev_ops;
SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
}
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index f9f6645b2e61..cba1401377ab 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -199,7 +199,7 @@ static inline void refill_rspq(struct adapter *adapter,
* need_skb_unmap - does the platform need unmapping of sk_buffs?
*
* Returns true if the platform needs sk_buff unmapping. The compiler
- * optimizes away unecessary code if this returns true.
+ * optimizes away unnecessary code if this returns true.
*/
static inline int need_skb_unmap(void)
{
@@ -2019,7 +2019,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
skb_pull(skb, sizeof(*p) + pad);
skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
pi = netdev_priv(skb->dev);
- if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid &&
+ if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
p->csum == htons(0xffff) && !p->fragment) {
qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2120,7 +2120,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
offset = 2 + sizeof(struct cpl_rx_pkt);
cpl = qs->lro_va = sd->pg_chunk.va + 2;
- if ((pi->rx_offload & T3_RX_CSUM) &&
+ if ((qs->netdev->features & NETIF_F_RXCSUM) &&
cpl->csum_valid && cpl->csum == htons(0xffff)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
@@ -2285,7 +2285,8 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
q->next_holdoff = q->holdoff_tmr;
while (likely(budget_left && is_new_response(r, q))) {
- int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
+ int packet_complete, eth, ethpad = 2;
+ int lro = !!(qs->netdev->features & NETIF_F_GRO);
struct sk_buff *skb = NULL;
u32 len, flags;
__be32 rss_hi, rss_lo;
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index d55db6b38e7b..c688421da9c7 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1386,11 +1386,11 @@ struct intr_info {
* @reg: the interrupt status register to process
* @mask: a mask to apply to the interrupt status
* @acts: table of interrupt actions
- * @stats: statistics counters tracking interrupt occurences
+ * @stats: statistics counters tracking interrupt occurrences
*
* A table driven interrupt handler that applies a set of masks to an
* interrupt status word and performs the corresponding actions if the
- * interrupts described by the mask have occured. The actions include
+ * interrupts described by the mask have occurred. The actions include
* optionally printing a warning or alert message, and optionally
* incrementing a stat counter. The table is terminated by an entry
* specifying mask 0. Returns the number of fatal interrupt conditions.
@@ -2783,7 +2783,7 @@ static void init_mtus(unsigned short mtus[])
{
/*
* See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
- * it can accomodate max size TCP/IP headers when SACK and timestamps
+ * it can accommodate max size TCP/IP headers when SACK and timestamps
* are enabled and still have at least 8 bytes of payload.
*/
mtus[0] = 88;
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 01d49eaa44d2..bc9982a4c1f4 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -290,7 +290,6 @@ struct port_info {
u8 port_id;
u8 tx_chan;
u8 lport; /* associated offload logical port */
- u8 rx_offload; /* CSO, etc */
u8 nqsets; /* # of qsets */
u8 first_qset; /* index of first qset */
u8 rss_mode;
@@ -298,11 +297,6 @@ struct port_info {
u16 *rss;
};
-/* port_info.rx_offload flags */
-enum {
- RX_CSO = 1 << 0,
-};
-
struct dentry;
struct work_struct;
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index 5352c8a23f4d..7e3cfbe89e3b 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -1336,15 +1336,20 @@ static int restart_autoneg(struct net_device *dev)
return 0;
}
-static int identify_port(struct net_device *dev, u32 data)
+static int identify_port(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
+ unsigned int val;
struct adapter *adap = netdev2adap(dev);
- if (data == 0)
- data = 2; /* default to 2 seconds */
+ if (state == ETHTOOL_ID_ACTIVE)
+ val = 0xffff;
+ else if (state == ETHTOOL_ID_INACTIVE)
+ val = 0;
+ else
+ return -EINVAL;
- return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid,
- data * 5);
+ return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
}
static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
@@ -1431,7 +1436,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
cmd->advertising = from_fw_linkcaps(p->port_type,
p->link_cfg.advertising);
- cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
+ ethtool_cmd_speed_set(cmd,
+ netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
cmd->duplex = DUPLEX_FULL;
cmd->autoneg = p->link_cfg.autoneg;
cmd->maxtxpkt = 0;
@@ -1455,6 +1461,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
unsigned int cap;
struct port_info *p = netdev_priv(dev);
struct link_config *lc = &p->link_cfg;
+ u32 speed = ethtool_cmd_speed(cmd);
if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
return -EINVAL;
@@ -1465,16 +1472,16 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
* being requested.
*/
if (cmd->autoneg == AUTONEG_DISABLE &&
- (lc->supported & speed_to_caps(cmd->speed)))
- return 0;
+ (lc->supported & speed_to_caps(speed)))
+ return 0;
return -EINVAL;
}
if (cmd->autoneg == AUTONEG_DISABLE) {
- cap = speed_to_caps(cmd->speed);
+ cap = speed_to_caps(speed);
- if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
- cmd->speed == SPEED_10000)
+ if (!(lc->supported & cap) || (speed == SPEED_1000) ||
+ (speed == SPEED_10000))
return -EINVAL;
lc->requested_speed = cap;
lc->advertising = 0;
@@ -1526,24 +1533,6 @@ static int set_pauseparam(struct net_device *dev,
return 0;
}
-static u32 get_rx_csum(struct net_device *dev)
-{
- struct port_info *p = netdev_priv(dev);
-
- return p->rx_offload & RX_CSO;
-}
-
-static int set_rx_csum(struct net_device *dev, u32 data)
-{
- struct port_info *p = netdev_priv(dev);
-
- if (data)
- p->rx_offload |= RX_CSO;
- else
- p->rx_offload &= ~RX_CSO;
- return 0;
-}
-
static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
{
const struct port_info *pi = netdev_priv(dev);
@@ -1865,36 +1854,20 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return err;
}
-#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-
-static int set_tso(struct net_device *dev, u32 value)
-{
- if (value)
- dev->features |= TSO_FLAGS;
- else
- dev->features &= ~TSO_FLAGS;
- return 0;
-}
-
-static int set_flags(struct net_device *dev, u32 flags)
+static int cxgb_set_features(struct net_device *dev, u32 features)
{
+ const struct port_info *pi = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
int err;
- unsigned long old_feat = dev->features;
- err = ethtool_op_set_flags(dev, flags, ETH_FLAG_RXHASH |
- ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
- if (err)
- return err;
-
- if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX) {
- const struct port_info *pi = netdev_priv(dev);
+ if (!(changed & NETIF_F_HW_VLAN_RX))
+ return 0;
- err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
- -1, -1, -1, !!(flags & ETH_FLAG_RXVLAN),
- true);
- if (err)
- dev->features = old_feat;
- }
+ err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ -1, -1, -1,
+ !!(features & NETIF_F_HW_VLAN_RX), true);
+ if (unlikely(err))
+ dev->features = features ^ NETIF_F_HW_VLAN_RX;
return err;
}
@@ -2005,13 +1978,9 @@ static struct ethtool_ops cxgb_ethtool_ops = {
.set_eeprom = set_eeprom,
.get_pauseparam = get_pauseparam,
.set_pauseparam = set_pauseparam,
- .get_rx_csum = get_rx_csum,
- .set_rx_csum = set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
- .set_sg = ethtool_op_set_sg,
.get_link = ethtool_op_get_link,
.get_strings = get_strings,
- .phys_id = identify_port,
+ .set_phys_id = identify_port,
.nway_reset = restart_autoneg,
.get_sset_count = get_sset_count,
.get_ethtool_stats = get_stats,
@@ -2019,8 +1988,6 @@ static struct ethtool_ops cxgb_ethtool_ops = {
.get_regs = get_regs,
.get_wol = get_wol,
.set_wol = set_wol,
- .set_tso = set_tso,
- .set_flags = set_flags,
.get_rxnfc = get_rxnfc,
.get_rxfh_indir = get_rss_table,
.set_rxfh_indir = set_rss_table,
@@ -2877,6 +2844,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_get_stats64 = cxgb_get_stats,
.ndo_set_rx_mode = cxgb_set_rxmode,
.ndo_set_mac_address = cxgb_set_mac_addr,
+ .ndo_set_features = cxgb_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = cxgb_ioctl,
.ndo_change_mtu = cxgb_change_mtu,
@@ -3559,6 +3527,7 @@ static void free_some_resources(struct adapter *adapter)
t4_fw_bye(adapter, adapter->fn);
}
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
@@ -3660,14 +3629,14 @@ static int __devinit init_one(struct pci_dev *pdev,
pi = netdev_priv(netdev);
pi->adapter = adapter;
pi->xact_addr_filt = -1;
- pi->rx_offload = RX_CSO;
pi->port_id = i;
netdev->irq = pdev->irq;
- netdev->features |= NETIF_F_SG | TSO_FLAGS;
- netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
- netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_RXHASH |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ netdev->features |= netdev->hw_features | highdma;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->netdev_ops = &cxgb4_netdev_ops;
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 311471b439a8..75a4b0fa19ee 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1556,7 +1556,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
{
bool csum_ok;
struct sk_buff *skb;
- struct port_info *pi;
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
@@ -1584,10 +1583,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
if (skb->dev->features & NETIF_F_RXHASH)
skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
- pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
- if (csum_ok && (pi->rx_offload & RX_CSO) &&
+ if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
(pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index b9fd8a6f2cc4..d1ec111aebd8 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -883,7 +883,7 @@ struct intr_info {
*
* A table driven interrupt handler that applies a set of masks to an
* interrupt status word and performs the corresponding actions if the
- * interrupts described by the mask have occured. The actions include
+ * interrupts described by the mask have occurred. The actions include
* optionally emitting a warning or alert message. The table is terminated
* by an entry specifying mask 0. Returns the number of fatal interrupt
* conditions.
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 4766b4116b41..4fd821aadc8a 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -97,17 +97,11 @@ struct port_info {
u16 rss_size; /* size of VI's RSS table slice */
u8 pidx; /* index into adapter port[] */
u8 port_id; /* physical port ID */
- u8 rx_offload; /* CSO, etc. */
u8 nqsets; /* # of "Queue Sets" */
u8 first_qset; /* index of first "Queue Set" */
struct link_config link_cfg; /* physical port configuration */
};
-/* port_info.rx_offload flags */
-enum {
- RX_CSO = 1 << 0,
-};
-
/*
* Scatter Gather Engine resources for the "adapter". Our ingress and egress
* queues are organized into "Queue Sets" with one ingress and one egress
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 6aad64df4dcb..e71c08e547e4 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -1167,7 +1167,8 @@ static int cxgb4vf_get_settings(struct net_device *dev,
cmd->supported = pi->link_cfg.supported;
cmd->advertising = pi->link_cfg.advertising;
- cmd->speed = netif_carrier_ok(dev) ? pi->link_cfg.speed : -1;
+ ethtool_cmd_speed_set(cmd,
+ netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
cmd->duplex = DUPLEX_FULL;
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -1326,37 +1327,22 @@ static void cxgb4vf_get_pauseparam(struct net_device *dev,
}
/*
- * Return whether RX Checksum Offloading is currently enabled for the device.
- */
-static u32 cxgb4vf_get_rx_csum(struct net_device *dev)
-{
- struct port_info *pi = netdev_priv(dev);
-
- return (pi->rx_offload & RX_CSO) != 0;
-}
-
-/*
- * Turn RX Checksum Offloading on or off for the device.
+ * Identify the port by blinking the port's LED.
*/
-static int cxgb4vf_set_rx_csum(struct net_device *dev, u32 csum)
+static int cxgb4vf_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
+ unsigned int val;
struct port_info *pi = netdev_priv(dev);
- if (csum)
- pi->rx_offload |= RX_CSO;
+ if (state == ETHTOOL_ID_ACTIVE)
+ val = 0xffff;
+ else if (state == ETHTOOL_ID_INACTIVE)
+ val = 0;
else
- pi->rx_offload &= ~RX_CSO;
- return 0;
-}
-
-/*
- * Identify the port by blinking the port's LED.
- */
-static int cxgb4vf_phys_id(struct net_device *dev, u32 id)
-{
- struct port_info *pi = netdev_priv(dev);
+ return -EINVAL;
- return t4vf_identify_port(pi->adapter, pi->viid, 5);
+ return t4vf_identify_port(pi->adapter, pi->viid, val);
}
/*
@@ -1560,18 +1546,6 @@ static void cxgb4vf_get_wol(struct net_device *dev,
*/
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-/*
- * Set TCP Segmentation Offloading feature capabilities.
- */
-static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
-{
- if (tso)
- dev->features |= TSO_FLAGS;
- else
- dev->features &= ~TSO_FLAGS;
- return 0;
-}
-
static struct ethtool_ops cxgb4vf_ethtool_ops = {
.get_settings = cxgb4vf_get_settings,
.get_drvinfo = cxgb4vf_get_drvinfo,
@@ -1582,19 +1556,14 @@ static struct ethtool_ops cxgb4vf_ethtool_ops = {
.get_coalesce = cxgb4vf_get_coalesce,
.set_coalesce = cxgb4vf_set_coalesce,
.get_pauseparam = cxgb4vf_get_pauseparam,
- .get_rx_csum = cxgb4vf_get_rx_csum,
- .set_rx_csum = cxgb4vf_set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
- .set_sg = ethtool_op_set_sg,
.get_link = ethtool_op_get_link,
.get_strings = cxgb4vf_get_strings,
- .phys_id = cxgb4vf_phys_id,
+ .set_phys_id = cxgb4vf_phys_id,
.get_sset_count = cxgb4vf_get_sset_count,
.get_ethtool_stats = cxgb4vf_get_ethtool_stats,
.get_regs_len = cxgb4vf_get_regs_len,
.get_regs = cxgb4vf_get_regs,
.get_wol = cxgb4vf_get_wol,
- .set_tso = cxgb4vf_set_tso,
};
/*
@@ -2629,19 +2598,19 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
* it.
*/
pi->xact_addr_filt = -1;
- pi->rx_offload = RX_CSO;
netif_carrier_off(netdev);
netdev->irq = pdev->irq;
- netdev->features = (NETIF_F_SG | TSO_FLAGS |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- NETIF_F_GRO);
+ netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
+ netdev->vlan_features = NETIF_F_SG | TSO_FLAGS |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA;
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_RX;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features =
- (netdev->features &
- ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX));
#ifdef HAVE_NET_DEVICE_OPS
netdev->netdev_ops = &cxgb4vf_netdev_ops;
@@ -2738,7 +2707,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
cfg_queues(adapter);
/*
- * Print a short notice on the existance and configuration of the new
+ * Print a short notice on the existence and configuration of the new
* VF network device ...
*/
for_each_port(adapter, pidx) {
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index e0b3d1bc2fdf..5182960e29fd 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -224,8 +224,8 @@ static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
/**
* need_skb_unmap - does the platform need unmapping of sk_buffs?
*
- * Returns true if the platfrom needs sk_buff unmapping. The compiler
- * optimizes away unecessary code if this returns true.
+ * Returns true if the platform needs sk_buff unmapping. The compiler
+ * optimizes away unnecessary code if this returns true.
*/
static inline int need_skb_unmap(void)
{
@@ -267,7 +267,7 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
*
* Tests specified Free List to see whether the number of buffers
* available to the hardware has falled below our "starvation"
- * threshhold.
+ * threshold.
*/
static inline bool fl_starving(const struct sge_fl *fl)
{
@@ -1149,7 +1149,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(credits < ETHTXQ_STOP_THRES)) {
/*
* After we're done injecting the Work Request for this
- * packet, we'll be below our "stop threshhold" so stop the TX
+ * packet, we'll be below our "stop threshold" so stop the TX
* Queue now and schedule a request for an SGE Egress Queue
* Update message. The queue will get started later on when
* the firmware processes this Work Request and sends us an
@@ -1555,8 +1555,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
pi = netdev_priv(skb->dev);
rxq->stats.pkts++;
- if (csum_ok && (pi->rx_offload & RX_CSO) && !pkt->err_vec &&
- (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
+ !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else {
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index baca6bfcb089..807b6bb200eb 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -94,14 +94,14 @@ MODULE_VERSION(EMAC_MODULE_VERSION);
static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
/* Configuration items */
-#define EMAC_DEF_PASS_CRC (0) /* Do not pass CRC upto frames */
+#define EMAC_DEF_PASS_CRC (0) /* Do not pass CRC up to frames */
#define EMAC_DEF_QOS_EN (0) /* EMAC proprietary QoS disabled */
#define EMAC_DEF_NO_BUFF_CHAIN (0) /* No buffer chain */
#define EMAC_DEF_MACCTRL_FRAME_EN (0) /* Discard Maccontrol frames */
#define EMAC_DEF_SHORT_FRAME_EN (0) /* Discard short frames */
#define EMAC_DEF_ERROR_FRAME_EN (0) /* Discard error frames */
-#define EMAC_DEF_PROM_EN (0) /* Promiscous disabled */
-#define EMAC_DEF_PROM_CH (0) /* Promiscous channel is 0 */
+#define EMAC_DEF_PROM_EN (0) /* Promiscuous disabled */
+#define EMAC_DEF_PROM_CH (0) /* Promiscuous channel is 0 */
#define EMAC_DEF_BCAST_EN (1) /* Broadcast enabled */
#define EMAC_DEF_BCAST_CH (0) /* Broadcast channel is 0 */
#define EMAC_DEF_MCAST_EN (1) /* Multicast enabled */
@@ -1013,7 +1013,7 @@ static void emac_rx_handler(void *token, int len, int status)
return;
}
- /* recycle on recieve error */
+ /* recycle on receive error */
if (status < 0) {
ndev->stats.rx_errors++;
goto recycle;
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 8b0084d17c8c..17654059922d 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -331,18 +331,18 @@ static struct {
"DE422",\
""}
-static char* __initdata depca_signature[] = DEPCA_SIGNATURE;
+static const char* const depca_signature[] __devinitconst = DEPCA_SIGNATURE;
enum depca_type {
DEPCA, de100, de101, de200, de201, de202, de210, de212, de422, unknown
};
-static char depca_string[] = "depca";
+static const char depca_string[] = "depca";
static int depca_device_remove (struct device *device);
#ifdef CONFIG_EISA
-static struct eisa_device_id depca_eisa_ids[] = {
+static const struct eisa_device_id depca_eisa_ids[] __devinitconst = {
{ "DEC4220", de422 },
{ "" }
};
@@ -367,19 +367,19 @@ static struct eisa_driver depca_eisa_driver = {
#define DE210_ID 0x628d
#define DE212_ID 0x6def
-static short depca_mca_adapter_ids[] = {
+static const short depca_mca_adapter_ids[] __devinitconst = {
DE210_ID,
DE212_ID,
0x0000
};
-static char *depca_mca_adapter_name[] = {
+static const char *depca_mca_adapter_name[] = {
"DEC EtherWORKS MC Adapter (DE210)",
"DEC EtherWORKS MC Adapter (DE212)",
NULL
};
-static enum depca_type depca_mca_adapter_type[] = {
+static const enum depca_type depca_mca_adapter_type[] = {
de210,
de212,
0
@@ -541,10 +541,9 @@ static void SetMulticastFilter(struct net_device *dev);
static int load_packet(struct net_device *dev, struct sk_buff *skb);
static void depca_dbg_open(struct net_device *dev);
-static u_char de1xx_irq[] __initdata = { 2, 3, 4, 5, 7, 9, 0 };
-static u_char de2xx_irq[] __initdata = { 5, 9, 10, 11, 15, 0 };
-static u_char de422_irq[] __initdata = { 5, 9, 10, 11, 0 };
-static u_char *depca_irq;
+static const u_char de1xx_irq[] __devinitconst = { 2, 3, 4, 5, 7, 9, 0 };
+static const u_char de2xx_irq[] __devinitconst = { 5, 9, 10, 11, 15, 0 };
+static const u_char de422_irq[] __devinitconst = { 5, 9, 10, 11, 0 };
static int irq;
static int io;
@@ -580,7 +579,7 @@ static const struct net_device_ops depca_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int __init depca_hw_init (struct net_device *dev, struct device *device)
+static int __devinit depca_hw_init (struct net_device *dev, struct device *device)
{
struct depca_private *lp;
int i, j, offset, netRAM, mem_len, status = 0;
@@ -748,6 +747,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
if (dev->irq < 2) {
unsigned char irqnum;
unsigned long irq_mask, delay;
+ const u_char *depca_irq;
irq_mask = probe_irq_on();
@@ -770,6 +770,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
break;
default:
+ depca_irq = NULL;
break; /* Not reached */
}
@@ -1302,7 +1303,7 @@ static void SetMulticastFilter(struct net_device *dev)
}
}
-static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
+static int __devinit depca_common_init (u_long ioaddr, struct net_device **devp)
{
int status = 0;
@@ -1333,7 +1334,7 @@ static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
/*
** Microchannel bus I/O device probe
*/
-static int __init depca_mca_probe(struct device *device)
+static int __devinit depca_mca_probe(struct device *device)
{
unsigned char pos[2];
unsigned char where;
@@ -1457,7 +1458,7 @@ static int __init depca_mca_probe(struct device *device)
** ISA bus I/O device probe
*/
-static void __init depca_platform_probe (void)
+static void __devinit depca_platform_probe (void)
{
int i;
struct platform_device *pldev;
@@ -1497,7 +1498,7 @@ static void __init depca_platform_probe (void)
}
}
-static enum depca_type __init depca_shmem_probe (ulong *mem_start)
+static enum depca_type __devinit depca_shmem_probe (ulong *mem_start)
{
u_long mem_base[] = DEPCA_RAM_BASE_ADDRESSES;
enum depca_type adapter = unknown;
@@ -1558,7 +1559,7 @@ static int __devinit depca_isa_probe (struct platform_device *device)
*/
#ifdef CONFIG_EISA
-static int __init depca_eisa_probe (struct device *device)
+static int __devinit depca_eisa_probe (struct device *device)
{
enum depca_type adapter = unknown;
struct eisa_device *edev;
@@ -1629,7 +1630,7 @@ static int __devexit depca_device_remove (struct device *device)
** and Boot (readb) ROM. This will also give us a clue to the network RAM
** base address.
*/
-static int __init DepcaSignature(char *name, u_long base_addr)
+static int __devinit DepcaSignature(char *name, u_long base_addr)
{
u_int i, j, k;
void __iomem *ptr;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index c05db6046050..c445457b66d5 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1189,10 +1189,10 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->transceiver = XCVR_INTERNAL;
}
if ( np->link_status ) {
- cmd->speed = np->speed;
+ ethtool_cmd_speed_set(cmd, np->speed);
cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
- cmd->speed = -1;
+ ethtool_cmd_speed_set(cmd, -1);
cmd->duplex = -1;
}
if ( np->an_enable)
@@ -1219,31 +1219,20 @@ static int rio_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} else {
np->an_enable = 0;
if (np->speed == 1000) {
- cmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(cmd, SPEED_100);
cmd->duplex = DUPLEX_FULL;
printk("Warning!! Can't disable Auto negotiation in 1000Mbps, change to Manual 100Mbps, Full duplex.\n");
}
- switch(cmd->speed + cmd->duplex) {
-
- case SPEED_10 + DUPLEX_HALF:
- np->speed = 10;
- np->full_duplex = 0;
- break;
-
- case SPEED_10 + DUPLEX_FULL:
+ switch (ethtool_cmd_speed(cmd)) {
+ case SPEED_10:
np->speed = 10;
- np->full_duplex = 1;
+ np->full_duplex = (cmd->duplex == DUPLEX_FULL);
break;
- case SPEED_100 + DUPLEX_HALF:
+ case SPEED_100:
np->speed = 100;
- np->full_duplex = 0;
- break;
- case SPEED_100 + DUPLEX_FULL:
- np->speed = 100;
- np->full_duplex = 1;
+ np->full_duplex = (cmd->duplex == DUPLEX_FULL);
break;
- case SPEED_1000 + DUPLEX_HALF:/* not supported */
- case SPEED_1000 + DUPLEX_FULL:/* not supported */
+ case SPEED_1000: /* not supported */
default:
return -EINVAL;
}
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index b7af5bab9937..fbaff3584bd4 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -131,8 +131,6 @@ typedef struct board_info {
u32 msg_enable;
u32 wake_state;
- int rx_csum;
- int can_csum;
int ip_summed;
} board_info_t;
@@ -470,47 +468,20 @@ static int dm9000_nway_reset(struct net_device *dev)
return mii_nway_restart(&dm->mii);
}
-static uint32_t dm9000_get_rx_csum(struct net_device *dev)
+static int dm9000_set_features(struct net_device *dev, u32 features)
{
board_info_t *dm = to_dm9000_board(dev);
- return dm->rx_csum;
-}
-
-static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
-{
- board_info_t *dm = to_dm9000_board(dev);
-
- if (dm->can_csum) {
- dm->rx_csum = data;
- iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
+ u32 changed = dev->features ^ features;
+ unsigned long flags;
+ if (!(changed & NETIF_F_RXCSUM))
return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
-static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
-{
- board_info_t *dm = to_dm9000_board(dev);
- unsigned long flags;
- int ret;
spin_lock_irqsave(&dm->lock, flags);
- ret = dm9000_set_rx_csum_unlocked(dev, data);
+ iow(dm, DM9000_RCSR, (features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
spin_unlock_irqrestore(&dm->lock, flags);
- return ret;
-}
-
-static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
-{
- board_info_t *dm = to_dm9000_board(dev);
- int ret = -EOPNOTSUPP;
-
- if (dm->can_csum)
- ret = ethtool_op_set_tx_csum(dev, data);
- return ret;
+ return 0;
}
static u32 dm9000_get_link(struct net_device *dev)
@@ -643,10 +614,6 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
.get_eeprom_len = dm9000_get_eeprom_len,
.get_eeprom = dm9000_get_eeprom,
.set_eeprom = dm9000_set_eeprom,
- .get_rx_csum = dm9000_get_rx_csum,
- .set_rx_csum = dm9000_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = dm9000_set_tx_csum,
};
static void dm9000_show_carrier(board_info_t *db,
@@ -800,7 +767,9 @@ dm9000_init_dm9000(struct net_device *dev)
db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
/* Checksum mode */
- dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
+ if (dev->hw_features & NETIF_F_RXCSUM)
+ iow(db, DM9000_RCSR,
+ (dev->features & NETIF_F_RXCSUM) ? RCSR_CSUM : 0);
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
@@ -1049,7 +1018,7 @@ dm9000_rx(struct net_device *dev)
/* Pass to upper layer */
skb->protocol = eth_type_trans(skb, dev);
- if (db->rx_csum) {
+ if (dev->features & NETIF_F_RXCSUM) {
if ((((rxbyte & 0x1c) << 3) & rxbyte) == 0)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
@@ -1358,6 +1327,7 @@ static const struct net_device_ops dm9000_netdev_ops = {
.ndo_set_multicast_list = dm9000_hash_table,
.ndo_do_ioctl = dm9000_ioctl,
.ndo_change_mtu = eth_change_mtu,
+ .ndo_set_features = dm9000_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1551,9 +1521,8 @@ dm9000_probe(struct platform_device *pdev)
/* dm9000a/b are capable of hardware checksum offload */
if (db->type == TYPE_DM9000A || db->type == TYPE_DM9000B) {
- db->can_csum = 1;
- db->rx_csum = 1;
- ndev->features |= NETIF_F_IP_CSUM;
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
+ ndev->features |= ndev->hw_features;
}
/* from this point we assume that we have found a DM9000 */
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index ff2d29b17858..39cf9b9bd673 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -168,10 +168,6 @@ static int __init dummy_init_one(void)
if (!dev_dummy)
return -ENOMEM;
- err = dev_alloc_name(dev_dummy, dev_dummy->name);
- if (err < 0)
- goto err;
-
dev_dummy->rtnl_link_ops = &dummy_link_ops;
err = register_netdevice(dev_dummy);
if (err < 0)
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index b0aa9e68990a..e336c7937f05 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -593,7 +593,6 @@ struct nic {
enum phy phy;
struct params params;
struct timer_list watchdog;
- struct timer_list blink_timer;
struct mii_if_info mii;
struct work_struct tx_timeout_task;
enum loopback loopback;
@@ -618,7 +617,6 @@ struct nic {
u32 rx_tco_frames;
u32 rx_over_length_errors;
- u16 leds;
u16 eeprom_wc;
__le16 eeprom[256];
spinlock_t mdio_lock;
@@ -1512,7 +1510,7 @@ static int e100_phy_init(struct nic *nic)
static int e100_hw_init(struct nic *nic)
{
- int err;
+ int err = 0;
e100_hw_reset(nic);
@@ -1668,7 +1666,8 @@ static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
static void e100_watchdog(unsigned long data)
{
struct nic *nic = (struct nic *)data;
- struct ethtool_cmd cmd;
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
+ u32 speed;
netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
"right now = %ld\n", jiffies);
@@ -1676,10 +1675,11 @@ static void e100_watchdog(unsigned long data)
/* mii library handles link maintenance tasks */
mii_ethtool_gset(&nic->mii, &cmd);
+ speed = ethtool_cmd_speed(&cmd);
if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
- cmd.speed == SPEED_100 ? 100 : 10,
+ speed == SPEED_100 ? 100 : 10,
cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
} else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
netdev_info(nic->netdev, "NIC Link is Down\n");
@@ -1698,13 +1698,13 @@ static void e100_watchdog(unsigned long data)
spin_unlock_irq(&nic->cmd_lock);
e100_update_stats(nic);
- e100_adjust_adaptive_ifs(nic, cmd.speed, cmd.duplex);
+ e100_adjust_adaptive_ifs(nic, speed, cmd.duplex);
if (nic->mac <= mac_82557_D100_C)
/* Issue a multicast command to workaround a 557 lock up */
e100_set_multicast_list(nic->netdev);
- if (nic->flags & ich && cmd.speed==SPEED_10 && cmd.duplex==DUPLEX_HALF)
+ if (nic->flags & ich && speed == SPEED_10 && cmd.duplex == DUPLEX_HALF)
/* Need SW workaround for ICH[x] 10Mbps/half duplex Tx hang. */
nic->flags |= ich_10h_workaround;
else
@@ -2351,30 +2351,6 @@ err_clean_rx:
#define E100_82552_LED_OVERRIDE 0x19
#define E100_82552_LED_ON 0x000F /* LEDTX and LED_RX both on */
#define E100_82552_LED_OFF 0x000A /* LEDTX and LED_RX both off */
-static void e100_blink_led(unsigned long data)
-{
- struct nic *nic = (struct nic *)data;
- enum led_state {
- led_on = 0x01,
- led_off = 0x04,
- led_on_559 = 0x05,
- led_on_557 = 0x07,
- };
- u16 led_reg = MII_LED_CONTROL;
-
- if (nic->phy == phy_82552_v) {
- led_reg = E100_82552_LED_OVERRIDE;
-
- nic->leds = (nic->leds == E100_82552_LED_ON) ?
- E100_82552_LED_OFF : E100_82552_LED_ON;
- } else {
- nic->leds = (nic->leds & led_on) ? led_off :
- (nic->mac < mac_82559_D101M) ? led_on_557 :
- led_on_559;
- }
- mdio_write(nic->netdev, nic->mii.phy_id, led_reg, nic->leds);
- mod_timer(&nic->blink_timer, jiffies + HZ / 4);
-}
static int e100_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
@@ -2598,19 +2574,38 @@ static void e100_diag_test(struct net_device *netdev,
msleep_interruptible(4 * 1000);
}
-static int e100_phys_id(struct net_device *netdev, u32 data)
+static int e100_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
struct nic *nic = netdev_priv(netdev);
+ enum led_state {
+ led_on = 0x01,
+ led_off = 0x04,
+ led_on_559 = 0x05,
+ led_on_557 = 0x07,
+ };
u16 led_reg = (nic->phy == phy_82552_v) ? E100_82552_LED_OVERRIDE :
- MII_LED_CONTROL;
+ MII_LED_CONTROL;
+ u16 leds = 0;
- if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
- data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
- mod_timer(&nic->blink_timer, jiffies);
- msleep_interruptible(data * 1000);
- del_timer_sync(&nic->blink_timer);
- mdio_write(netdev, nic->mii.phy_id, led_reg, 0);
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 2;
+
+ case ETHTOOL_ID_ON:
+ leds = (nic->phy == phy_82552_v) ? E100_82552_LED_ON :
+ (nic->mac < mac_82559_D101M) ? led_on_557 : led_on_559;
+ break;
+
+ case ETHTOOL_ID_OFF:
+ leds = (nic->phy == phy_82552_v) ? E100_82552_LED_OFF : led_off;
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ break;
+ }
+ mdio_write(netdev, nic->mii.phy_id, led_reg, leds);
return 0;
}
@@ -2691,7 +2686,7 @@ static const struct ethtool_ops e100_ethtool_ops = {
.set_ringparam = e100_set_ringparam,
.self_test = e100_diag_test,
.get_strings = e100_get_strings,
- .phys_id = e100_phys_id,
+ .set_phys_id = e100_set_phys_id,
.get_ethtool_stats = e100_get_ethtool_stats,
.get_sset_count = e100_get_sset_count,
};
@@ -2832,9 +2827,6 @@ static int __devinit e100_probe(struct pci_dev *pdev,
init_timer(&nic->watchdog);
nic->watchdog.function = e100_watchdog;
nic->watchdog.data = (unsigned long)nic;
- init_timer(&nic->blink_timer);
- nic->blink_timer.function = e100_blink_led;
- nic->blink_timer.data = (unsigned long)nic;
INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index a881dd0093bd..8676899120c3 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -238,9 +238,6 @@ struct e1000_adapter {
struct work_struct reset_task;
u8 fc_autoneg;
- struct timer_list blink_timer;
- unsigned long led_status;
-
/* TX */
struct e1000_tx_ring *tx_ring; /* One per active queue */
unsigned int restart_queue;
@@ -349,7 +346,7 @@ extern int e1000_up(struct e1000_adapter *adapter);
extern void e1000_down(struct e1000_adapter *adapter);
extern void e1000_reinit_locked(struct e1000_adapter *adapter);
extern void e1000_reset(struct e1000_adapter *adapter);
-extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx);
extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index f4d0922ec65b..ec0fa426cce2 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -158,9 +158,9 @@ static int e1000_get_settings(struct net_device *netdev,
e1000_get_speed_and_duplex(hw, &adapter->link_speed,
&adapter->link_duplex);
- ecmd->speed = adapter->link_speed;
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
- /* unfortunatly FULL_DUPLEX != DUPLEX_FULL
+ /* unfortunately FULL_DUPLEX != DUPLEX_FULL
* and HALF_DUPLEX != DUPLEX_HALF */
if (adapter->link_duplex == FULL_DUPLEX)
@@ -168,7 +168,7 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
@@ -197,11 +197,13 @@ static int e1000_set_settings(struct net_device *netdev,
ADVERTISED_TP |
ADVERTISED_Autoneg;
ecmd->advertising = hw->autoneg_advertised;
- } else
- if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+ } else {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
clear_bit(__E1000_RESETTING, &adapter->flags);
return -EINVAL;
}
+ }
/* reset the link */
@@ -1753,46 +1755,28 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-/* toggle LED 4 times per second = 2 "blinks" per second */
-#define E1000_ID_INTERVAL (HZ/4)
-
-/* bit defines for adapter->led_status */
-#define E1000_LED_ON 0
-
-static void e1000_led_blink_callback(unsigned long data)
+static int e1000_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+ struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
- e1000_led_off(hw);
- else
- e1000_led_on(hw);
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ e1000_setup_led(hw);
+ return 2;
- mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
-}
-
-static int e1000_phys_id(struct net_device *netdev, u32 data)
-{
- struct e1000_adapter *adapter = netdev_priv(netdev);
- struct e1000_hw *hw = &adapter->hw;
+ case ETHTOOL_ID_ON:
+ e1000_led_on(hw);
+ break;
- if (!data)
- data = INT_MAX;
+ case ETHTOOL_ID_OFF:
+ e1000_led_off(hw);
+ break;
- if (!adapter->blink_timer.function) {
- init_timer(&adapter->blink_timer);
- adapter->blink_timer.function = e1000_led_blink_callback;
- adapter->blink_timer.data = (unsigned long)adapter;
+ case ETHTOOL_ID_INACTIVE:
+ e1000_cleanup_led(hw);
}
- e1000_setup_led(hw);
- mod_timer(&adapter->blink_timer, jiffies);
- msleep_interruptible(data * 1000);
- del_timer_sync(&adapter->blink_timer);
-
- e1000_led_off(hw);
- clear_bit(E1000_LED_ON, &adapter->led_status);
- e1000_cleanup_led(hw);
return 0;
}
@@ -1929,7 +1913,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_tso = e1000_set_tso,
.self_test = e1000_diag_test,
.get_strings = e1000_get_strings,
- .phys_id = e1000_phys_id,
+ .set_phys_id = e1000_set_phys_id,
.get_ethtool_stats = e1000_get_ethtool_stats,
.get_sset_count = e1000_get_sset_count,
.get_coalesce = e1000_get_coalesce,
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index c70b23d52284..5c9a8403668b 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -1026,7 +1026,7 @@ extern void __iomem *ce4100_gbe_mdio_base_virt;
#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
#define E1000_MDPHYA 0x0003C /* PHY address - RW */
-#define E1000_MANC2H 0x05860 /* Managment Control To Host - RW */
+#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */
#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
#define E1000_GCR 0x05B00 /* PCI-Ex Control */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index bfab14092d2c..c18cb8e883dd 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -96,7 +96,6 @@ int e1000_up(struct e1000_adapter *adapter);
void e1000_down(struct e1000_adapter *adapter);
void e1000_reinit_locked(struct e1000_adapter *adapter);
void e1000_reset(struct e1000_adapter *adapter);
-int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
@@ -205,7 +204,7 @@ static struct pci_driver e1000_driver = {
.probe = e1000_probe,
.remove = __devexit_p(e1000_remove),
#ifdef CONFIG_PM
- /* Power Managment Hooks */
+ /* Power Management Hooks */
.suspend = e1000_suspend,
.resume = e1000_resume,
#endif
@@ -4385,7 +4384,6 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
struct mii_ioctl_data *data = if_mii(ifr);
int retval;
u16 mii_reg;
- u16 spddplx;
unsigned long flags;
if (hw->media_type != e1000_media_type_copper)
@@ -4424,17 +4422,18 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
hw->autoneg = 1;
hw->autoneg_advertised = 0x2F;
} else {
+ u32 speed;
if (mii_reg & 0x40)
- spddplx = SPEED_1000;
+ speed = SPEED_1000;
else if (mii_reg & 0x2000)
- spddplx = SPEED_100;
+ speed = SPEED_100;
else
- spddplx = SPEED_10;
- spddplx += (mii_reg & 0x100)
- ? DUPLEX_FULL :
- DUPLEX_HALF;
- retval = e1000_set_spd_dplx(adapter,
- spddplx);
+ speed = SPEED_10;
+ retval = e1000_set_spd_dplx(
+ adapter, speed,
+ ((mii_reg & 0x100)
+ ? DUPLEX_FULL :
+ DUPLEX_HALF));
if (retval)
return retval;
}
@@ -4596,20 +4595,24 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
}
}
-int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
{
struct e1000_hw *hw = &adapter->hw;
hw->autoneg = 0;
+ /* Make sure dplx is at most 1 bit and lsb of speed is not set
+ * for the switch() below to work */
+ if ((spd & 1) || (dplx & ~1))
+ goto err_inval;
+
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((hw->media_type == e1000_media_type_fiber) &&
- spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- e_err(probe, "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
- }
+ spd != SPEED_1000 &&
+ dplx != DUPLEX_FULL)
+ goto err_inval;
- switch (spddplx) {
+ switch (spd + dplx) {
case SPEED_10 + DUPLEX_HALF:
hw->forced_speed_duplex = e1000_10_half;
break;
@@ -4628,10 +4631,13 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
- e_err(probe, "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
+ goto err_inval;
}
return 0;
+
+err_inval:
+ e_err(probe, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
}
static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 89a69035e538..8295f2192439 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -300,6 +300,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
+ func->blink_led = e1000e_blink_led_generic;
/* FWSM register */
mac->has_fwsm = true;
@@ -320,6 +321,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
default:
func->check_mng_mode = e1000e_check_mng_mode_generic;
func->led_on = e1000e_led_on_generic;
+ func->blink_led = e1000e_blink_led_generic;
/* FWSM register */
mac->has_fwsm = true;
@@ -431,9 +433,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
case e1000_82573:
case e1000_82574:
case e1000_82583:
- /* Disable ASPM L0s due to hardware errata */
- e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S);
-
if (pdev->device == E1000_DEV_ID_82573L) {
adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
adapter->max_hw_frame_size = DEFAULT_JUMBO;
@@ -594,7 +593,7 @@ static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
- msleep(2);
+ usleep_range(2000, 4000);
i++;
} while (i < MDIO_OWNERSHIP_TIMEOUT);
@@ -816,7 +815,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
/* Check for pending operations. */
for (i = 0; i < E1000_FLASH_UPDATES; i++) {
- msleep(1);
+ usleep_range(1000, 2000);
if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
break;
}
@@ -840,7 +839,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
ew32(EECD, eecd);
for (i = 0; i < E1000_FLASH_UPDATES; i++) {
- msleep(1);
+ usleep_range(1000, 2000);
if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
break;
}
@@ -930,7 +929,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
if (er32(EEMNGCTL) &
E1000_NVM_CFG_DONE_PORT_0)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout--;
}
if (!timeout) {
@@ -1037,7 +1036,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(TCTL, E1000_TCTL_PSP);
e1e_flush();
- msleep(10);
+ usleep_range(10000, 20000);
/*
* Must acquire the MDIO ownership before MAC reset.
@@ -2066,7 +2065,8 @@ struct e1000_info e1000_82573_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_SWSM_ON_LOAD,
- .flags2 = FLAG2_DISABLE_ASPM_L1,
+ .flags2 = FLAG2_DISABLE_ASPM_L1
+ | FLAG2_DISABLE_ASPM_L0S,
.pba = 20,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
@@ -2086,7 +2086,8 @@ struct e1000_info e1000_82574_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
- .flags2 = FLAG2_CHECK_PHY_HANG,
+ .flags2 = FLAG2_CHECK_PHY_HANG
+ | FLAG2_DISABLE_ASPM_L0S,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -2104,6 +2105,7 @@ struct e1000_info e1000_82583_info = {
| FLAG_HAS_SMART_POWER_DOWN
| FLAG_HAS_AMT
| FLAG_HAS_CTRLEXT_ON_LOAD,
+ .flags2 = FLAG2_DISABLE_ASPM_L0S,
.pba = 32,
.max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 00bf595ebd67..9549879e66a0 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -31,6 +31,7 @@
#ifndef _E1000_H_
#define _E1000_H_
+#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
@@ -39,6 +40,7 @@
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/crc32.h>
+#include <linux/if_vlan.h>
#include "hw.h"
@@ -280,7 +282,7 @@ struct e1000_adapter {
const struct e1000_info *ei;
- struct vlan_group *vlgrp;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u32 bd_number;
u32 rx_buffer_len;
u16 mng_vlan_id;
@@ -389,13 +391,10 @@ struct e1000_adapter {
bool fc_autoneg;
- unsigned long led_status;
-
unsigned int flags;
unsigned int flags2;
struct work_struct downshift_task;
struct work_struct update_phy_task;
- struct work_struct led_blink_task;
struct work_struct print_hang_task;
bool idle_check;
@@ -456,6 +455,7 @@ struct e1000_info {
#define FLAG2_HAS_PHY_STATS (1 << 4)
#define FLAG2_HAS_EEE (1 << 5)
#define FLAG2_DMA_BURST (1 << 6)
+#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
#define FLAG2_DISABLE_AIM (1 << 8)
#define FLAG2_CHECK_PHY_HANG (1 << 9)
@@ -484,7 +484,6 @@ extern const char e1000e_driver_version[];
extern void e1000e_check_options(struct e1000_adapter *adapter);
extern void e1000e_set_ethtool_ops(struct net_device *netdev);
-extern void e1000e_led_blink_task(struct work_struct *work);
extern int e1000e_up(struct e1000_adapter *adapter);
extern void e1000e_down(struct e1000_adapter *adapter);
@@ -502,7 +501,6 @@ extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
-extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
extern unsigned int copybreak;
@@ -573,7 +571,7 @@ extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
extern void e1000e_config_collision_dist(struct e1000_hw *hw);
extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
-extern s32 e1000e_blink_led(struct e1000_hw *hw);
+extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
extern void e1000e_reset_adaptive(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 2fefa820302b..f4bbeb22f51f 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -612,7 +612,7 @@ static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
while (timeout) {
if (er32(EEMNGCTL) & mask)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout--;
}
if (!timeout) {
@@ -802,7 +802,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ew32(TCTL, E1000_TCTL_PSP);
e1e_flush();
- msleep(10);
+ usleep_range(10000, 20000);
ctrl = er32(CTRL);
@@ -1434,6 +1434,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
static struct e1000_mac_operations es2_mac_ops = {
.read_mac_addr = e1000_read_mac_addr_80003es2lan,
.id_led_init = e1000e_id_led_init,
+ .blink_led = e1000e_blink_led_generic,
.check_mng_mode = e1000e_check_mng_mode_generic,
/* check_for_link dependent on media type */
.cleanup_led = e1000e_cleanup_led_generic,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 07f09e96e453..859d0d3af6c9 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -122,6 +122,7 @@ static int e1000_get_settings(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u32 speed;
if (hw->phy.media_type == e1000_media_type_copper) {
@@ -159,23 +160,23 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_EXTERNAL;
}
- ecmd->speed = -1;
+ speed = -1;
ecmd->duplex = -1;
if (netif_running(netdev)) {
if (netif_carrier_ok(netdev)) {
- ecmd->speed = adapter->link_speed;
+ speed = adapter->link_speed;
ecmd->duplex = adapter->link_duplex - 1;
}
} else {
u32 status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
- ecmd->speed = 1000;
+ speed = SPEED_1000;
else if (status & E1000_STATUS_SPEED_100)
- ecmd->speed = 100;
+ speed = SPEED_100;
else
- ecmd->speed = 10;
+ speed = SPEED_10;
if (status & E1000_STATUS_FD)
ecmd->duplex = DUPLEX_FULL;
@@ -184,6 +185,7 @@ static int e1000_get_settings(struct net_device *netdev,
}
}
+ ethtool_cmd_speed_set(ecmd, speed);
ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
@@ -198,20 +200,25 @@ static int e1000_get_settings(struct net_device *netdev,
return 0;
}
-static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
+static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
{
struct e1000_mac_info *mac = &adapter->hw.mac;
mac->autoneg = 0;
+ /* Make sure dplx is at most 1 bit and lsb of speed is not set
+ * for the switch() below to work */
+ if ((spd & 1) || (dplx & ~1))
+ goto err_inval;
+
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
- spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- e_err("Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
+ spd != SPEED_1000 &&
+ dplx != DUPLEX_FULL) {
+ goto err_inval;
}
- switch (spddplx) {
+ switch (spd + dplx) {
case SPEED_10 + DUPLEX_HALF:
mac->forced_speed_duplex = ADVERTISE_10_HALF;
break;
@@ -230,10 +237,13 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
- e_err("Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
+ goto err_inval;
}
return 0;
+
+err_inval:
+ e_err("Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
}
static int e1000_set_settings(struct net_device *netdev,
@@ -253,7 +263,7 @@ static int e1000_set_settings(struct net_device *netdev,
}
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
@@ -269,7 +279,8 @@ static int e1000_set_settings(struct net_device *netdev,
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
} else {
- if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
clear_bit(__E1000_RESETTING, &adapter->state);
return -EINVAL;
}
@@ -317,7 +328,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
adapter->fc_autoneg = pause->autoneg;
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
@@ -673,7 +684,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
return -EINVAL;
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (netif_running(adapter->netdev))
e1000e_down(adapter);
@@ -952,7 +963,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
- msleep(10);
+ usleep_range(10000, 20000);
/* Test each interrupt */
for (i = 0; i < 10; i++) {
@@ -984,7 +995,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, mask);
ew32(ICS, mask);
- msleep(10);
+ usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1002,7 +1013,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMS, mask);
ew32(ICS, mask);
- msleep(10);
+ usleep_range(10000, 20000);
if (!(adapter->test_icr & mask)) {
*data = 4;
@@ -1020,7 +1031,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
adapter->test_icr = 0;
ew32(IMC, ~mask & 0x00007FFF);
ew32(ICS, ~mask & 0x00007FFF);
- msleep(10);
+ usleep_range(10000, 20000);
if (adapter->test_icr) {
*data = 5;
@@ -1031,7 +1042,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Disable all the interrupts */
ew32(IMC, 0xFFFFFFFF);
- msleep(10);
+ usleep_range(10000, 20000);
/* Unhook test interrupt handler */
free_irq(irq, netdev);
@@ -1406,7 +1417,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
*/
#define E1000_SERDES_LB_ON 0x410
ew32(SCTL, E1000_SERDES_LB_ON);
- msleep(10);
+ usleep_range(10000, 20000);
return 0;
}
@@ -1501,7 +1512,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
hw->phy.media_type == e1000_media_type_internal_serdes) {
#define E1000_SERDES_LB_OFF 0x400
ew32(SCTL, E1000_SERDES_LB_OFF);
- msleep(10);
+ usleep_range(10000, 20000);
break;
}
/* Fall Through */
@@ -1851,64 +1862,35 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-/* toggle LED 4 times per second = 2 "blinks" per second */
-#define E1000_ID_INTERVAL (HZ/4)
-
-/* bit defines for adapter->led_status */
-#define E1000_LED_ON 0
-
-void e1000e_led_blink_task(struct work_struct *work)
-{
- struct e1000_adapter *adapter = container_of(work,
- struct e1000_adapter, led_blink_task);
-
- if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
- adapter->hw.mac.ops.led_off(&adapter->hw);
- else
- adapter->hw.mac.ops.led_on(&adapter->hw);
-}
-
-static void e1000_led_blink_callback(unsigned long data)
-{
- struct e1000_adapter *adapter = (struct e1000_adapter *) data;
-
- schedule_work(&adapter->led_blink_task);
- mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
-}
-
-static int e1000_phys_id(struct net_device *netdev, u32 data)
+static int e1000_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if (!data)
- data = INT_MAX;
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (!hw->mac.ops.blink_led)
+ return 2; /* cycle on/off twice per second */
- if ((hw->phy.type == e1000_phy_ife) ||
- (hw->mac.type == e1000_pchlan) ||
- (hw->mac.type == e1000_pch2lan) ||
- (hw->mac.type == e1000_82583) ||
- (hw->mac.type == e1000_82574)) {
- if (!adapter->blink_timer.function) {
- init_timer(&adapter->blink_timer);
- adapter->blink_timer.function =
- e1000_led_blink_callback;
- adapter->blink_timer.data = (unsigned long) adapter;
- }
- mod_timer(&adapter->blink_timer, jiffies);
- msleep_interruptible(data * 1000);
- del_timer_sync(&adapter->blink_timer);
+ hw->mac.ops.blink_led(hw);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
if (hw->phy.type == e1000_phy_ife)
e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
- } else {
- e1000e_blink_led(hw);
- msleep_interruptible(data * 1000);
- }
+ hw->mac.ops.led_off(hw);
+ hw->mac.ops.cleanup_led(hw);
+ break;
- hw->mac.ops.led_off(hw);
- clear_bit(E1000_LED_ON, &adapter->led_status);
- hw->mac.ops.cleanup_led(hw);
+ case ETHTOOL_ID_ON:
+ adapter->hw.mac.ops.led_on(&adapter->hw);
+ break;
+ case ETHTOOL_ID_OFF:
+ adapter->hw.mac.ops.led_off(&adapter->hw);
+ break;
+ }
return 0;
}
@@ -2020,6 +2002,31 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
}
}
+static int e1000e_set_flags(struct net_device *netdev, u32 data)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ bool need_reset = false;
+ int rc;
+
+ need_reset = (data & ETH_FLAG_RXVLAN) !=
+ (netdev->features & NETIF_F_HW_VLAN_RX);
+
+ rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_RXVLAN |
+ ETH_FLAG_TXVLAN);
+
+ if (rc)
+ return rc;
+
+ if (need_reset) {
+ if (netif_running(netdev))
+ e1000e_reinit_locked(adapter);
+ else
+ e1000e_reset(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
@@ -2049,12 +2056,13 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_tso = e1000_set_tso,
.self_test = e1000_diag_test,
.get_strings = e1000_get_strings,
- .phys_id = e1000_phys_id,
+ .set_phys_id = e1000_set_phys_id,
.get_ethtool_stats = e1000_get_ethtool_stats,
.get_sset_count = e1000e_get_sset_count,
.get_coalesce = e1000_get_coalesce,
.set_coalesce = e1000_set_coalesce,
.get_flags = ethtool_op_get_flags,
+ .set_flags = e1000e_set_flags,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 307e1ec22417..6c2fa8327f5c 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -756,6 +756,7 @@ struct e1000_host_mng_command_info {
/* Function pointers and static data for the MAC. */
struct e1000_mac_operations {
s32 (*id_led_init)(struct e1000_hw *);
+ s32 (*blink_led)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
s32 (*check_for_link)(struct e1000_hw *);
s32 (*cleanup_led)(struct e1000_hw *);
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index ce1dbfdca112..3369d1f6a39c 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,7 +338,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
/* Ungate automatic PHY configuration on non-managed 82579 */
if ((hw->mac.type == e1000_pch2lan) &&
!(fwsm & E1000_ICH_FWSM_FW_VALID)) {
- msleep(10);
+ usleep_range(10000, 20000);
e1000_gate_hw_phy_config_ich8lan(hw, false);
}
@@ -427,7 +427,7 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
phy->id = 0;
while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
(i++ < 100)) {
- msleep(1);
+ usleep_range(1000, 2000);
ret_val = e1000e_get_phy_id(hw);
if (ret_val)
return ret_val;
@@ -564,6 +564,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
/* ID LED init */
mac->ops.id_led_init = e1000e_id_led_init;
+ /* blink LED */
+ mac->ops.blink_led = e1000e_blink_led_generic;
/* setup LED */
mac->ops.setup_led = e1000e_setup_led_generic;
/* cleanup LED */
@@ -767,6 +769,8 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
(!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+
+ hw->mac.ops.blink_led = NULL;
}
if ((adapter->hw.mac.type == e1000_ich8lan) &&
@@ -1704,7 +1708,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
goto out;
/* Allow time for h/w to get to quiescent state after reset */
- msleep(10);
+ usleep_range(10000, 20000);
/* Perform any necessary post-reset workarounds */
switch (hw->mac.type) {
@@ -1737,7 +1741,7 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
if (hw->mac.type == e1000_pch2lan) {
/* Ungate automatic PHY configuration on non-managed 82579 */
if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
- msleep(10);
+ usleep_range(10000, 20000);
e1000_gate_hw_phy_config_ich8lan(hw, false);
}
@@ -2532,7 +2536,7 @@ release:
*/
if (!ret_val) {
e1000e_reload_nvm(hw);
- msleep(10);
+ usleep_range(10000, 20000);
}
out:
@@ -3009,7 +3013,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ew32(TCTL, E1000_TCTL_PSP);
e1e_flush();
- msleep(10);
+ usleep_range(10000, 20000);
/* Workaround for ICH8 bit corruption issue in FIFO memory */
if (hw->mac.type == e1000_ich8lan) {
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 96921de5df2e..dd8ab05b5590 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -144,7 +144,7 @@ void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
* @hw: pointer to the HW structure
* @rar_count: receive address registers
*
- * Setups the receive address registers by setting the base receive address
+ * Setup the receive address registers by setting the base receive address
* register to the devices MAC address and clearing all the other receive
* address registers to 0.
**/
@@ -868,7 +868,7 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
* milliseconds even if the other end is doing it in SW).
*/
for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
- msleep(10);
+ usleep_range(10000, 20000);
status = er32(STATUS);
if (status & E1000_STATUS_LU)
break;
@@ -930,7 +930,7 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
ew32(CTRL, ctrl);
e1e_flush();
- msleep(1);
+ usleep_range(1000, 2000);
/*
* For these adapters, the SW definable pin 1 is set when the optics
@@ -1181,7 +1181,7 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise Rx
* ONLY. Hence, we must now check to see if we need to
- * turn OFF the TRANSMISSION of PAUSE frames.
+ * turn OFF the TRANSMISSION of PAUSE frames.
*/
if (hw->fc.requested_mode == e1000_fc_full) {
hw->fc.current_mode = e1000_fc_full;
@@ -1385,7 +1385,7 @@ s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
while (i < AUTO_READ_DONE_TIMEOUT) {
if (er32(EECD) & E1000_EECD_AUTO_RD)
break;
- msleep(1);
+ usleep_range(1000, 2000);
i++;
}
@@ -1530,12 +1530,12 @@ s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
}
/**
- * e1000e_blink_led - Blink LED
+ * e1000e_blink_led_generic - Blink LED
* @hw: pointer to the HW structure
*
* Blink the LEDs which are set to be on.
**/
-s32 e1000e_blink_led(struct e1000_hw *hw)
+s32 e1000e_blink_led_generic(struct e1000_hw *hw)
{
u32 ledctl_blink = 0;
u32 i;
@@ -2087,8 +2087,6 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
if (ret_val)
return ret_val;
- msleep(10);
-
while (widx < words) {
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
@@ -2132,7 +2130,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
}
}
- msleep(10);
+ usleep_range(10000, 20000);
nvm->ops.release(hw);
return 0;
}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index a39d4a4d871c..0939040305fa 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -58,6 +58,8 @@
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
+
static const struct e1000_info *e1000_info_tbl[] = {
[board_82571] = &e1000_82571_info,
[board_82572] = &e1000_82572_info,
@@ -459,13 +461,13 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
struct net_device *netdev, struct sk_buff *skb,
u8 status, __le16 vlan)
{
+ u16 tag = le16_to_cpu(vlan);
skb->protocol = eth_type_trans(skb, netdev);
- if (adapter->vlgrp && (status & E1000_RXD_STAT_VP))
- vlan_gro_receive(&adapter->napi, adapter->vlgrp,
- le16_to_cpu(vlan), skb);
- else
- napi_gro_receive(&adapter->napi, skb);
+ if (status & E1000_RXD_STAT_VP)
+ __vlan_hwaccel_put_tag(skb, tag);
+
+ napi_gro_receive(&adapter->napi, skb);
}
/**
@@ -2433,6 +2435,8 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
vfta |= (1 << (vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
+
+ set_bit(vid, adapter->active_vlans);
}
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -2441,13 +2445,6 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
struct e1000_hw *hw = &adapter->hw;
u32 vfta, index;
- if (!test_bit(__E1000_DOWN, &adapter->state))
- e1000_irq_disable(adapter);
- vlan_group_set_device(adapter->vlgrp, vid, NULL);
-
- if (!test_bit(__E1000_DOWN, &adapter->state))
- e1000_irq_enable(adapter);
-
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id)) {
@@ -2463,93 +2460,105 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
vfta &= ~(1 << (vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
+
+ clear_bit(vid, adapter->active_vlans);
}
-static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+/**
+ * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- u16 vid = adapter->hw.mng_cookie.vlan_id;
- u16 old_vid = adapter->mng_vlan_id;
-
- if (!adapter->vlgrp)
- return;
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
- if (!vlan_group_get_device(adapter->vlgrp, vid)) {
- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- if (adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
- e1000_vlan_rx_add_vid(netdev, vid);
- adapter->mng_vlan_id = vid;
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ /* disable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
+ ew32(RCTL, rctl);
+
+ if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+ e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+ adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
}
-
- if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
- (vid != old_vid) &&
- !vlan_group_get_device(adapter->vlgrp, old_vid))
- e1000_vlan_rx_kill_vid(netdev, old_vid);
- } else {
- adapter->mng_vlan_id = vid;
}
}
+/**
+ * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 rctl;
-static void e1000_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
+ if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+ /* enable VLAN receive filtering */
+ rctl = er32(RCTL);
+ rctl |= E1000_RCTL_VFE;
+ rctl &= ~E1000_RCTL_CFIEN;
+ ew32(RCTL, rctl);
+ }
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
{
- struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 ctrl, rctl;
+ u32 ctrl;
- if (!test_bit(__E1000_DOWN, &adapter->state))
- e1000_irq_disable(adapter);
- adapter->vlgrp = grp;
+ /* disable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl &= ~E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+}
- if (grp) {
- /* enable VLAN tag insert/strip */
- ctrl = er32(CTRL);
- ctrl |= E1000_CTRL_VME;
- ew32(CTRL, ctrl);
+/**
+ * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 ctrl;
- if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
- /* enable VLAN receive filtering */
- rctl = er32(RCTL);
- rctl &= ~E1000_RCTL_CFIEN;
- ew32(RCTL, rctl);
- e1000_update_mng_vlan(adapter);
- }
- } else {
- /* disable VLAN tag insert/strip */
- ctrl = er32(CTRL);
- ctrl &= ~E1000_CTRL_VME;
- ew32(CTRL, ctrl);
+ /* enable VLAN tag insert/strip */
+ ctrl = er32(CTRL);
+ ctrl |= E1000_CTRL_VME;
+ ew32(CTRL, ctrl);
+}
- if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
- if (adapter->mng_vlan_id !=
- (u16)E1000_MNG_VLAN_NONE) {
- e1000_vlan_rx_kill_vid(netdev,
- adapter->mng_vlan_id);
- adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
- }
- }
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u16 vid = adapter->hw.mng_cookie.vlan_id;
+ u16 old_vid = adapter->mng_vlan_id;
+
+ if (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+ e1000_vlan_rx_add_vid(netdev, vid);
+ adapter->mng_vlan_id = vid;
}
- if (!test_bit(__E1000_DOWN, &adapter->state))
- e1000_irq_enable(adapter);
+ if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
+ e1000_vlan_rx_kill_vid(netdev, old_vid);
}
static void e1000_restore_vlan(struct e1000_adapter *adapter)
{
u16 vid;
- e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
-
- if (!adapter->vlgrp)
- return;
+ e1000_vlan_rx_add_vid(adapter->netdev, 0);
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- if (!vlan_group_get_device(adapter->vlgrp, vid))
- continue;
+ for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
e1000_vlan_rx_add_vid(adapter->netdev, vid);
- }
}
static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
@@ -2902,7 +2911,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e1e_flush();
- msleep(10);
+ usleep_range(10000, 20000);
if (adapter->flags2 & FLAG2_DMA_BURST) {
/*
@@ -3039,6 +3048,8 @@ static void e1000_set_multi(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
rctl &= ~E1000_RCTL_VFE;
+ /* Do not hardware filter VLANs in promisc mode */
+ e1000e_vlan_filter_disable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
@@ -3046,8 +3057,7 @@ static void e1000_set_multi(struct net_device *netdev)
} else {
rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
}
- if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
- rctl |= E1000_RCTL_VFE;
+ e1000e_vlan_filter_enable(adapter);
}
ew32(RCTL, rctl);
@@ -3072,6 +3082,11 @@ static void e1000_set_multi(struct net_device *netdev)
*/
e1000_update_mc_addr_list(hw, NULL, 0);
}
+
+ if (netdev->features & NETIF_F_HW_VLAN_RX)
+ e1000e_vlan_strip_enable(adapter);
+ else
+ e1000e_vlan_strip_disable(adapter);
}
/**
@@ -3383,7 +3398,7 @@ void e1000e_down(struct e1000_adapter *adapter)
ew32(TCTL, tctl);
/* flush both disables and wait for them to finish */
e1e_flush();
- msleep(10);
+ usleep_range(10000, 20000);
napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
@@ -3418,7 +3433,7 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
{
might_sleep();
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
e1000e_down(adapter);
e1000e_up(adapter);
clear_bit(__E1000_RESETTING, &adapter->state);
@@ -3721,10 +3736,8 @@ static int e1000_close(struct net_device *netdev)
* kill manageability vlan ID if supported, but not if a vlan with
* the same ID is registered on the host OS (let 8021q kill it)
*/
- if ((adapter->hw.mng_cookie.status &
- E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
- !(adapter->vlgrp &&
- vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
+ if (adapter->hw.mng_cookie.status &
+ E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
/*
@@ -4328,7 +4341,6 @@ static void e1000_watchdog_task(struct work_struct *work)
link_up:
spin_lock(&adapter->stats64_lock);
e1000e_update_stats(adapter);
- spin_unlock(&adapter->stats64_lock);
mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
adapter->tpt_old = adapter->stats.tpt;
@@ -4339,6 +4351,7 @@ link_up:
adapter->gorc_old = adapter->stats.gorc;
adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
adapter->gotc_old = adapter->stats.gotc;
+ spin_unlock(&adapter->stats64_lock);
e1000e_update_adaptive(&adapter->hw);
@@ -4886,7 +4899,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (skb->protocol == htons(ETH_P_IP))
tx_flags |= E1000_TX_FLAGS_IPV4;
- /* if count is 0 then mapping error has occured */
+ /* if count is 0 then mapping error has occurred */
count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
if (count) {
e1000_tx_queue(adapter, tx_flags, count);
@@ -5028,7 +5041,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
}
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
/* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
adapter->max_frame_size = max_frame;
e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
@@ -5373,7 +5386,7 @@ static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
}
#endif
-void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
(state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
@@ -5393,13 +5406,19 @@ static int __e1000_resume(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u16 aspm_disable_flag = 0;
u32 err;
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_save_state(pdev);
- if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
- e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
e1000e_set_interrupt_capability(adapter);
if (netif_running(netdev)) {
@@ -5643,11 +5662,17 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ u16 aspm_disable_flag = 0;
int err;
pci_ers_result_t result;
+ if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
- e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
+
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev,
@@ -5714,7 +5739,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
u8 pba_str[E1000_PBANUM_LENGTH];
/* print bus type/speed/width info */
- e_info("(PCI Express:2.5GB/s:%s) %pM\n",
+ e_info("(PCI Express:2.5GT/s:%s) %pM\n",
/* bus width */
((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
"Width x1"),
@@ -5759,7 +5784,6 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_tx_timeout = e1000_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_vlan_rx_register = e1000_vlan_rx_register,
.ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -5789,12 +5813,17 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
resource_size_t flash_start, flash_len;
static int cards_found;
+ u16 aspm_disable_flag = 0;
int i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
+ if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
+ aspm_disable_flag = PCIE_LINK_STATE_L0S;
if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
- e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
+ aspm_disable_flag |= PCIE_LINK_STATE_L1;
+ if (aspm_disable_flag)
+ e1000e_disable_aspm(pdev, aspm_disable_flag);
err = pci_enable_device_mem(pdev);
if (err)
@@ -5991,7 +6020,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
- INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
/* Initialize link parameters. User can change them with ethtool */
adapter->hw.mac.autoneg = 1;
@@ -6124,7 +6152,6 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
cancel_work_sync(&adapter->watchdog_task);
cancel_work_sync(&adapter->downshift_task);
cancel_work_sync(&adapter->update_phy_task);
- cancel_work_sync(&adapter->led_blink_task);
cancel_work_sync(&adapter->print_hang_task);
if (!(netdev->flags & IFF_UP))
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 6ae31fcfb629..484774c13c21 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -2372,7 +2372,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
ret_val = 0;
goto out;
}
- msleep(1);
+ usleep_range(1000, 2000);
i++;
} while (i < 10);
}
@@ -2740,7 +2740,7 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
e1e_rphy(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
e1e_wphy(hw, PHY_CONTROL, mii_reg);
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index eb35951a2442..dfeb006035df 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1703,7 +1703,7 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
cmd->advertising |= ADVERTISED_AUI;
}
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
if (dev->if_port == TPE && lp->word[1] & ee_Duplex) {
cmd->duplex = DUPLEX_FULL;
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 3e2e734fecb7..7f642aef5e82 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -34,6 +34,7 @@
static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ehea_port *port = netdev_priv(dev);
+ u32 speed;
int ret;
ret = ehea_sense_port_attr(port);
@@ -43,27 +44,44 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (netif_carrier_ok(dev)) {
switch (port->port_speed) {
- case EHEA_SPEED_10M: cmd->speed = SPEED_10; break;
- case EHEA_SPEED_100M: cmd->speed = SPEED_100; break;
- case EHEA_SPEED_1G: cmd->speed = SPEED_1000; break;
- case EHEA_SPEED_10G: cmd->speed = SPEED_10000; break;
+ case EHEA_SPEED_10M:
+ speed = SPEED_10;
+ break;
+ case EHEA_SPEED_100M:
+ speed = SPEED_100;
+ break;
+ case EHEA_SPEED_1G:
+ speed = SPEED_1000;
+ break;
+ case EHEA_SPEED_10G:
+ speed = SPEED_10000;
+ break;
+ default:
+ speed = -1;
+ break; /* BUG */
}
cmd->duplex = port->full_duplex == 1 ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
- cmd->speed = -1;
+ speed = ~0;
cmd->duplex = -1;
}
+ ethtool_cmd_speed_set(cmd, speed);
- cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_1000baseT_Full
- | SUPPORTED_100baseT_Full | SUPPORTED_100baseT_Half
- | SUPPORTED_10baseT_Full | SUPPORTED_10baseT_Half
- | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
-
- cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_Autoneg
- | ADVERTISED_FIBRE);
+ if (cmd->speed == SPEED_10000) {
+ cmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ cmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+ cmd->port = PORT_FIBRE;
+ } else {
+ cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full
+ | SUPPORTED_100baseT_Half | SUPPORTED_10baseT_Full
+ | SUPPORTED_10baseT_Half | SUPPORTED_Autoneg
+ | SUPPORTED_TP);
+ cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg
+ | ADVERTISED_TP);
+ cmd->port = PORT_TP;
+ }
- cmd->port = PORT_FIBRE;
cmd->autoneg = port->autoneg == 1 ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
@@ -162,11 +180,6 @@ static void ehea_set_msglevel(struct net_device *dev, u32 value)
port->msg_enable = value;
}
-static u32 ehea_get_rx_csum(struct net_device *dev)
-{
- return 1;
-}
-
static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
{"sig_comp_iv"},
{"swqe_refill_th"},
@@ -263,34 +276,16 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
}
-static int ehea_set_flags(struct net_device *dev, u32 data)
-{
- /* Avoid changing the VLAN flags */
- if ((data & (ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN)) !=
- (ethtool_op_get_flags(dev) & (ETH_FLAG_RXVLAN |
- ETH_FLAG_TXVLAN))){
- return -EINVAL;
- }
-
- return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
- | ETH_FLAG_TXVLAN
- | ETH_FLAG_RXVLAN);
-}
-
const struct ethtool_ops ehea_ethtool_ops = {
.get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
.get_msglevel = ehea_get_msglevel,
.set_msglevel = ehea_set_msglevel,
.get_link = ethtool_op_get_link,
- .set_tso = ethtool_op_set_tso,
.get_strings = ehea_get_strings,
.get_sset_count = ehea_get_sset_count,
.get_ethtool_stats = ehea_get_ethtool_stats,
- .get_rx_csum = ehea_get_rx_csum,
.set_settings = ehea_set_settings,
- .get_flags = ethtool_op_get_flags,
- .set_flags = ehea_set_flags,
.nway_reset = ehea_nway_reset, /* Restart autonegotiation */
};
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index f75d3144b8a5..a004bbcf72e6 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -3040,11 +3040,14 @@ static void ehea_rereg_mrs(void)
if (dev->flags & IFF_UP) {
mutex_lock(&port->port_lock);
- port_napi_enable(port);
ret = ehea_restart_qps(dev);
- check_sqs(port);
- if (!ret)
+ if (!ret) {
+ check_sqs(port);
+ port_napi_enable(port);
netif_wake_queue(dev);
+ } else {
+ netdev_err(dev, "Unable to restart QPS\n");
+ }
mutex_unlock(&port->port_lock);
}
}
@@ -3262,10 +3265,12 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev->netdev_ops = &ehea_netdev_ops;
ehea_set_ethtool_ops(dev);
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
+ | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
| NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
- | NETIF_F_LLTX;
+ | NETIF_F_LLTX | NETIF_F_RXCSUM;
dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
if (use_lro)
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 907b05a1c659..2837ce209cd7 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1488,7 +1488,7 @@ enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->supported = SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
| SUPPORTED_TP;
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
cmd->duplex = priv->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
cmd->port = PORT_TP;
cmd->autoneg = AUTONEG_DISABLE;
@@ -1499,7 +1499,8 @@ enc28j60_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int
enc28j60_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
- return enc28j60_setlink(dev, cmd->autoneg, cmd->speed, cmd->duplex);
+ return enc28j60_setlink(dev, cmd->autoneg,
+ ethtool_cmd_speed(cmd), cmd->duplex);
}
static u32 enc28j60_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/enc28j60_hw.h b/drivers/net/enc28j60_hw.h
index 1a0b20969f80..25b41de49f0e 100644
--- a/drivers/net/enc28j60_hw.h
+++ b/drivers/net/enc28j60_hw.h
@@ -303,7 +303,7 @@
/* maximum ethernet frame length */
#define MAX_FRAMELEN 1518
-/* Prefered half duplex: LEDA: Link status LEDB: Rx/Tx activity */
+/* Preferred half duplex: LEDA: Link status LEDB: Rx/Tx activity */
#define ENC28J60_LAMPS_MODE 0x3476
#endif
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 2e573be16c13..9d4974bba247 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
- enic_res.o enic_dev.o vnic_dev.o vnic_rq.o vnic_vic.o
+ enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index 3a3c3c8a3a9b..38b351c7b979 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.12"
+#define DRV_VERSION "2.1.1.13"
#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -84,7 +84,6 @@ struct enic {
unsigned int flags;
unsigned int mc_count;
unsigned int uc_count;
- int csum_rx_enabled;
u32 port_mtu;
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
@@ -120,4 +119,6 @@ static inline struct device *enic_get_dev(struct enic *enic)
return &(enic->pdev->dev);
}
+void enic_reset_addr_lists(struct enic *enic);
+
#endif /* _ENIC_H_ */
diff --git a/drivers/net/enic/enic_dev.c b/drivers/net/enic/enic_dev.c
index 37ad3a1c82ee..90687b14e60f 100644
--- a/drivers/net/enic/enic_dev.c
+++ b/drivers/net/enic/enic_dev.c
@@ -177,24 +177,24 @@ int enic_vnic_dev_deinit(struct enic *enic)
return err;
}
-int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
+int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
{
int err;
spin_lock(&enic->devcmd_lock);
- err = vnic_dev_init_prov(enic->vdev,
+ err = vnic_dev_init_prov2(enic->vdev,
(u8 *)vp, vic_provinfo_size(vp));
spin_unlock(&enic->devcmd_lock);
return err;
}
-int enic_dev_init_done(struct enic *enic, int *done, int *error)
+int enic_dev_deinit_done(struct enic *enic, int *status)
{
int err;
spin_lock(&enic->devcmd_lock);
- err = vnic_dev_init_done(enic->vdev, done, error);
+ err = vnic_dev_deinit_done(enic->vdev, status);
spin_unlock(&enic->devcmd_lock);
return err;
@@ -219,3 +219,57 @@ void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
enic_del_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
}
+
+int enic_dev_enable2(struct enic *enic, int active)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_enable2(enic->vdev, active);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_enable2_done(struct enic *enic, int *status)
+{
+ int err;
+
+ spin_lock(&enic->devcmd_lock);
+ err = vnic_dev_enable2_done(enic->vdev, status);
+ spin_unlock(&enic->devcmd_lock);
+
+ return err;
+}
+
+int enic_dev_status_to_errno(int devcmd_status)
+{
+ switch (devcmd_status) {
+ case ERR_SUCCESS:
+ return 0;
+ case ERR_EINVAL:
+ return -EINVAL;
+ case ERR_EFAULT:
+ return -EFAULT;
+ case ERR_EPERM:
+ return -EPERM;
+ case ERR_EBUSY:
+ return -EBUSY;
+ case ERR_ECMDUNKNOWN:
+ case ERR_ENOTSUPPORTED:
+ return -EOPNOTSUPP;
+ case ERR_EBADSTATE:
+ return -EINVAL;
+ case ERR_ENOMEM:
+ return -ENOMEM;
+ case ERR_ETIMEDOUT:
+ return -ETIMEDOUT;
+ case ERR_ELINKDOWN:
+ return -ENETDOWN;
+ case ERR_EINPROGRESS:
+ return -EINPROGRESS;
+ case ERR_EMAXRES:
+ default:
+ return (devcmd_status < 0) ? devcmd_status : -1;
+ }
+}
diff --git a/drivers/net/enic/enic_dev.h b/drivers/net/enic/enic_dev.h
index 495f57fcb887..d5f681337626 100644
--- a/drivers/net/enic/enic_dev.h
+++ b/drivers/net/enic/enic_dev.h
@@ -35,7 +35,10 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
int enic_dev_enable(struct enic *enic);
int enic_dev_disable(struct enic *enic);
int enic_vnic_dev_deinit(struct enic *enic);
-int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp);
-int enic_dev_init_done(struct enic *enic, int *done, int *error);
+int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp);
+int enic_dev_deinit_done(struct enic *enic, int *status);
+int enic_dev_enable2(struct enic *enic, int arg);
+int enic_dev_enable2_done(struct enic *enic, int *status);
+int enic_dev_status_to_errno(int devcmd_status);
#endif /* _ENIC_DEV_H_ */
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index 8b9cad5e9712..3d99b0f1a236 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -45,6 +45,7 @@
#include "enic_res.h"
#include "enic.h"
#include "enic_dev.h"
+#include "enic_pp.h"
#define ENIC_NOTIFY_TIMER_PERIOD (2 * HZ)
#define WQ_ENET_MAX_DESC_LEN (1 << WQ_ENET_LEN_BITS)
@@ -179,10 +180,10 @@ static int enic_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_EXTERNAL;
if (netif_carrier_ok(netdev)) {
- ecmd->speed = vnic_dev_port_speed(enic->vdev);
+ ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
ecmd->duplex = DUPLEX_FULL;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
@@ -250,56 +251,6 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
}
-static u32 enic_get_rx_csum(struct net_device *netdev)
-{
- struct enic *enic = netdev_priv(netdev);
- return enic->csum_rx_enabled;
-}
-
-static int enic_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct enic *enic = netdev_priv(netdev);
-
- if (data && !ENIC_SETTING(enic, RXCSUM))
- return -EINVAL;
-
- enic->csum_rx_enabled = !!data;
-
- return 0;
-}
-
-static int enic_set_tx_csum(struct net_device *netdev, u32 data)
-{
- struct enic *enic = netdev_priv(netdev);
-
- if (data && !ENIC_SETTING(enic, TXCSUM))
- return -EINVAL;
-
- if (data)
- netdev->features |= NETIF_F_HW_CSUM;
- else
- netdev->features &= ~NETIF_F_HW_CSUM;
-
- return 0;
-}
-
-static int enic_set_tso(struct net_device *netdev, u32 data)
-{
- struct enic *enic = netdev_priv(netdev);
-
- if (data && !ENIC_SETTING(enic, TSO))
- return -EINVAL;
-
- if (data)
- netdev->features |=
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN;
- else
- netdev->features &=
- ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN);
-
- return 0;
-}
-
static u32 enic_get_msglevel(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
@@ -387,17 +338,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_strings = enic_get_strings,
.get_sset_count = enic_get_sset_count,
.get_ethtool_stats = enic_get_ethtool_stats,
- .get_rx_csum = enic_get_rx_csum,
- .set_rx_csum = enic_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = enic_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = enic_set_tso,
.get_coalesce = enic_get_coalesce,
.set_coalesce = enic_set_coalesce,
- .get_flags = ethtool_op_get_flags,
};
static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
@@ -874,7 +816,7 @@ static struct net_device_stats *enic_get_stats(struct net_device *netdev)
return net_stats;
}
-static void enic_reset_addr_lists(struct enic *enic)
+void enic_reset_addr_lists(struct enic *enic)
{
enic->mc_count = 0;
enic->uc_count = 0;
@@ -1112,157 +1054,77 @@ static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return -EINVAL;
}
-static int enic_set_port_profile(struct enic *enic, u8 *mac)
-{
- struct vic_provinfo *vp;
- u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
- u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
- char uuid_str[38];
- char client_mac_str[18];
- u8 *client_mac;
- int err;
-
- err = enic_vnic_dev_deinit(enic);
- if (err)
- return err;
-
- enic_reset_addr_lists(enic);
-
- switch (enic->pp.request) {
-
- case PORT_REQUEST_ASSOCIATE:
-
- if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
- return -EINVAL;
-
- if (!is_valid_ether_addr(mac))
- return -EADDRNOTAVAIL;
-
- vp = vic_provinfo_alloc(GFP_KERNEL, oui,
- VIC_PROVINFO_GENERIC_TYPE);
- if (!vp)
- return -ENOMEM;
-
- vic_provinfo_add_tlv(vp,
- VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
- strlen(enic->pp.name) + 1, enic->pp.name);
-
- if (!is_zero_ether_addr(enic->pp.mac_addr))
- client_mac = enic->pp.mac_addr;
- else
- client_mac = mac;
-
- vic_provinfo_add_tlv(vp,
- VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
- ETH_ALEN, client_mac);
-
- sprintf(client_mac_str, "%pM", client_mac);
- vic_provinfo_add_tlv(vp,
- VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
- sizeof(client_mac_str), client_mac_str);
-
- if (enic->pp.set & ENIC_SET_INSTANCE) {
- sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
- vic_provinfo_add_tlv(vp,
- VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
- sizeof(uuid_str), uuid_str);
- }
-
- if (enic->pp.set & ENIC_SET_HOST) {
- sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
- vic_provinfo_add_tlv(vp,
- VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
- sizeof(uuid_str), uuid_str);
- }
-
- os_type = htons(os_type);
- vic_provinfo_add_tlv(vp,
- VIC_GENERIC_PROV_TLV_OS_TYPE,
- sizeof(os_type), &os_type);
-
- err = enic_dev_init_prov(enic, vp);
- vic_provinfo_free(vp);
- if (err)
- return err;
- break;
-
- case PORT_REQUEST_DISASSOCIATE:
- break;
-
- default:
- return -EINVAL;
- }
-
- /* Set flag to indicate that the port assoc/disassoc
- * request has been sent out to fw
- */
- enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
-
- return 0;
-}
-
static int enic_set_vf_port(struct net_device *netdev, int vf,
struct nlattr *port[])
{
struct enic *enic = netdev_priv(netdev);
- struct enic_port_profile new_pp;
- int err = 0;
+ struct enic_port_profile prev_pp;
+ int err = 0, restore_pp = 1;
- memset(&new_pp, 0, sizeof(new_pp));
+ /* don't support VFs, yet */
+ if (vf != PORT_SELF_VF)
+ return -EOPNOTSUPP;
- if (port[IFLA_PORT_REQUEST]) {
- new_pp.set |= ENIC_SET_REQUEST;
- new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
- }
+ if (!port[IFLA_PORT_REQUEST])
+ return -EOPNOTSUPP;
+
+ memcpy(&prev_pp, &enic->pp, sizeof(enic->pp));
+ memset(&enic->pp, 0, sizeof(enic->pp));
+
+ enic->pp.set |= ENIC_SET_REQUEST;
+ enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
if (port[IFLA_PORT_PROFILE]) {
- new_pp.set |= ENIC_SET_NAME;
- memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]),
+ enic->pp.set |= ENIC_SET_NAME;
+ memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
- new_pp.set |= ENIC_SET_INSTANCE;
- memcpy(new_pp.instance_uuid,
+ enic->pp.set |= ENIC_SET_INSTANCE;
+ memcpy(enic->pp.instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
- new_pp.set |= ENIC_SET_HOST;
- memcpy(new_pp.host_uuid,
+ enic->pp.set |= ENIC_SET_HOST;
+ memcpy(enic->pp.host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
}
- /* don't support VFs, yet */
- if (vf != PORT_SELF_VF)
- return -EOPNOTSUPP;
-
- if (!(new_pp.set & ENIC_SET_REQUEST))
- return -EOPNOTSUPP;
-
- if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
- /* Special case handling */
- if (!is_zero_ether_addr(enic->pp.vf_mac))
- memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
+ /* Special case handling: mac came from IFLA_VF_MAC */
+ if (!is_zero_ether_addr(prev_pp.vf_mac))
+ memcpy(enic->pp.mac_addr, prev_pp.vf_mac, ETH_ALEN);
if (is_zero_ether_addr(netdev->dev_addr))
random_ether_addr(netdev->dev_addr);
- }
- memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
+ err = enic_process_set_pp_request(enic, &prev_pp, &restore_pp);
+ if (err) {
+ if (restore_pp) {
+ /* Things are still the way they were: Implicit
+ * DISASSOCIATE failed
+ */
+ memcpy(&enic->pp, &prev_pp, sizeof(enic->pp));
+ } else {
+ memset(&enic->pp, 0, sizeof(enic->pp));
+ memset(netdev->dev_addr, 0, ETH_ALEN);
+ }
+ } else {
+ /* Set flag to indicate that the port assoc/disassoc
+ * request has been sent out to fw
+ */
+ enic->pp.set |= ENIC_PORT_REQUEST_APPLIED;
- err = enic_set_port_profile(enic, netdev->dev_addr);
- if (err)
- goto set_port_profile_cleanup;
+ /* If DISASSOCIATE, clean up all assigned/saved macaddresses */
+ if (enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
+ memset(enic->pp.mac_addr, 0, ETH_ALEN);
+ memset(netdev->dev_addr, 0, ETH_ALEN);
+ }
+ }
-set_port_profile_cleanup:
memset(enic->pp.vf_mac, 0, ETH_ALEN);
- if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
- memset(netdev->dev_addr, 0, ETH_ALEN);
- memset(enic->pp.mac_addr, 0, ETH_ALEN);
- }
-
return err;
}
@@ -1270,34 +1132,15 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
struct sk_buff *skb)
{
struct enic *enic = netdev_priv(netdev);
- int err, error, done;
u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
+ int err;
if (!(enic->pp.set & ENIC_PORT_REQUEST_APPLIED))
return -ENODATA;
- err = enic_dev_init_done(enic, &done, &error);
+ err = enic_process_get_pp_request(enic, enic->pp.request, &response);
if (err)
- error = err;
-
- switch (error) {
- case ERR_SUCCESS:
- if (!done)
- response = PORT_PROFILE_RESPONSE_INPROGRESS;
- break;
- case ERR_EINVAL:
- response = PORT_PROFILE_RESPONSE_INVALID;
- break;
- case ERR_EBADSTATE:
- response = PORT_PROFILE_RESPONSE_BADSTATE;
- break;
- case ERR_ENOMEM:
- response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
- break;
- default:
- response = PORT_PROFILE_RESPONSE_ERROR;
- break;
- }
+ return err;
NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
@@ -1407,7 +1250,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb_put(skb, bytes_written);
skb->protocol = eth_type_trans(skb, netdev);
- if (enic->csum_rx_enabled && !csum_not_calc) {
+ if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
skb->csum = htons(checksum);
skb->ip_summed = CHECKSUM_COMPLETE;
}
@@ -2536,17 +2379,18 @@ static int __devinit enic_probe(struct pci_dev *pdev,
dev_info(dev, "loopback tag=0x%04x\n", enic->loop_tag);
}
if (ENIC_SETTING(enic, TXCSUM))
- netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ netdev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
if (ENIC_SETTING(enic, TSO))
- netdev->features |= NETIF_F_TSO |
+ netdev->hw_features |= NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO_ECN;
- if (ENIC_SETTING(enic, LRO))
- netdev->features |= NETIF_F_GRO;
+ if (ENIC_SETTING(enic, RXCSUM))
+ netdev->hw_features |= NETIF_F_RXCSUM;
+
+ netdev->features |= netdev->hw_features;
+
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM);
-
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Cannot register net device, aborting\n");
diff --git a/drivers/net/enic/enic_pp.c b/drivers/net/enic/enic_pp.c
new file mode 100644
index 000000000000..ffaa75dd1ded
--- /dev/null
+++ b/drivers/net/enic/enic_pp.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/ip.h>
+
+#include "vnic_vic.h"
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+static int enic_set_port_profile(struct enic *enic)
+{
+ struct net_device *netdev = enic->netdev;
+ struct vic_provinfo *vp;
+ const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
+ const u16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
+ char uuid_str[38];
+ char client_mac_str[18];
+ u8 *client_mac;
+ int err;
+
+ if (!(enic->pp.set & ENIC_SET_NAME) || !strlen(enic->pp.name))
+ return -EINVAL;
+
+ vp = vic_provinfo_alloc(GFP_KERNEL, oui,
+ VIC_PROVINFO_GENERIC_TYPE);
+ if (!vp)
+ return -ENOMEM;
+
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
+ strlen(enic->pp.name) + 1, enic->pp.name);
+
+ if (!is_zero_ether_addr(enic->pp.mac_addr))
+ client_mac = enic->pp.mac_addr;
+ else
+ client_mac = netdev->dev_addr;
+
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
+ ETH_ALEN, client_mac);
+
+ snprintf(client_mac_str, sizeof(client_mac_str), "%pM", client_mac);
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
+ sizeof(client_mac_str), client_mac_str);
+
+ if (enic->pp.set & ENIC_SET_INSTANCE) {
+ sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ if (enic->pp.set & ENIC_SET_HOST) {
+ sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
+ sizeof(uuid_str), uuid_str);
+ }
+
+ VIC_PROVINFO_ADD_TLV(vp,
+ VIC_GENERIC_PROV_TLV_OS_TYPE,
+ sizeof(os_type), &os_type);
+
+ err = enic_dev_status_to_errno(enic_dev_init_prov2(enic, vp));
+
+add_tlv_failure:
+ vic_provinfo_free(vp);
+
+ return err;
+}
+
+static int enic_unset_port_profile(struct enic *enic)
+{
+ int err;
+
+ err = enic_vnic_dev_deinit(enic);
+ if (err)
+ return enic_dev_status_to_errno(err);
+
+ enic_reset_addr_lists(enic);
+
+ return 0;
+}
+
+static int enic_are_pp_different(struct enic_port_profile *pp1,
+ struct enic_port_profile *pp2)
+{
+ return strcmp(pp1->name, pp2->name) | !!memcmp(pp1->instance_uuid,
+ pp2->instance_uuid, PORT_UUID_MAX) |
+ !!memcmp(pp1->host_uuid, pp2->host_uuid, PORT_UUID_MAX) |
+ !!memcmp(pp1->mac_addr, pp2->mac_addr, ETH_ALEN);
+}
+
+static int enic_pp_preassociate(struct enic *enic,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+static int enic_pp_disassociate(struct enic *enic,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+static int enic_pp_preassociate_rr(struct enic *enic,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+static int enic_pp_associate(struct enic *enic,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+
+static int (*enic_pp_handlers[])(struct enic *enic,
+ struct enic_port_profile *prev_state, int *restore_pp) = {
+ [PORT_REQUEST_PREASSOCIATE] = enic_pp_preassociate,
+ [PORT_REQUEST_PREASSOCIATE_RR] = enic_pp_preassociate_rr,
+ [PORT_REQUEST_ASSOCIATE] = enic_pp_associate,
+ [PORT_REQUEST_DISASSOCIATE] = enic_pp_disassociate,
+};
+
+static const int enic_pp_handlers_count =
+ sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers);
+
+static int enic_pp_preassociate(struct enic *enic,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ return -EOPNOTSUPP;
+}
+
+static int enic_pp_disassociate(struct enic *enic,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ return enic_unset_port_profile(enic);
+}
+
+static int enic_pp_preassociate_rr(struct enic *enic,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ int err;
+ int active = 0;
+
+ if (enic->pp.request != PORT_REQUEST_ASSOCIATE) {
+ /* If pre-associate is not part of an associate.
+ We always disassociate first */
+ err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](enic,
+ prev_pp, restore_pp);
+ if (err)
+ return err;
+
+ *restore_pp = 0;
+ }
+
+ *restore_pp = 0;
+
+ err = enic_set_port_profile(enic);
+ if (err)
+ return err;
+
+ /* If pre-associate is not part of an associate. */
+ if (enic->pp.request != PORT_REQUEST_ASSOCIATE)
+ err = enic_dev_status_to_errno(enic_dev_enable2(enic, active));
+
+ return err;
+}
+
+static int enic_pp_associate(struct enic *enic,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ int err;
+ int active = 1;
+
+ /* Check if a pre-associate was called before */
+ if (prev_pp->request != PORT_REQUEST_PREASSOCIATE_RR ||
+ (prev_pp->request == PORT_REQUEST_PREASSOCIATE_RR &&
+ enic_are_pp_different(prev_pp, &enic->pp))) {
+ err = enic_pp_handlers[PORT_REQUEST_DISASSOCIATE](
+ enic, prev_pp, restore_pp);
+ if (err)
+ return err;
+
+ *restore_pp = 0;
+ }
+
+ err = enic_pp_handlers[PORT_REQUEST_PREASSOCIATE_RR](
+ enic, prev_pp, restore_pp);
+ if (err)
+ return err;
+
+ *restore_pp = 0;
+
+ return enic_dev_status_to_errno(enic_dev_enable2(enic, active));
+}
+
+int enic_process_set_pp_request(struct enic *enic,
+ struct enic_port_profile *prev_pp, int *restore_pp)
+{
+ if (enic->pp.request < enic_pp_handlers_count
+ && enic_pp_handlers[enic->pp.request])
+ return enic_pp_handlers[enic->pp.request](enic,
+ prev_pp, restore_pp);
+ else
+ return -EOPNOTSUPP;
+}
+
+int enic_process_get_pp_request(struct enic *enic, int request,
+ u16 *response)
+{
+ int err, status = ERR_SUCCESS;
+
+ switch (request) {
+
+ case PORT_REQUEST_PREASSOCIATE_RR:
+ case PORT_REQUEST_ASSOCIATE:
+ err = enic_dev_enable2_done(enic, &status);
+ break;
+
+ case PORT_REQUEST_DISASSOCIATE:
+ err = enic_dev_deinit_done(enic, &status);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (err)
+ status = err;
+
+ switch (status) {
+ case ERR_SUCCESS:
+ *response = PORT_PROFILE_RESPONSE_SUCCESS;
+ break;
+ case ERR_EINVAL:
+ *response = PORT_PROFILE_RESPONSE_INVALID;
+ break;
+ case ERR_EBADSTATE:
+ *response = PORT_PROFILE_RESPONSE_BADSTATE;
+ break;
+ case ERR_ENOMEM:
+ *response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
+ break;
+ case ERR_EINPROGRESS:
+ *response = PORT_PROFILE_RESPONSE_INPROGRESS;
+ break;
+ default:
+ *response = PORT_PROFILE_RESPONSE_ERROR;
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/enic/enic_pp.h b/drivers/net/enic/enic_pp.h
new file mode 100644
index 000000000000..699e365a944d
--- /dev/null
+++ b/drivers/net/enic/enic_pp.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2011 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ENIC_PP_H_
+#define _ENIC_PP_H_
+
+int enic_process_set_pp_request(struct enic *enic,
+ struct enic_port_profile *prev_pp, int *restore_pp);
+int enic_process_get_pp_request(struct enic *enic, int request,
+ u16 *response);
+
+#endif /* _ENIC_PP_H_ */
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index f111a37419ce..6e5c6356e7df 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -98,9 +98,9 @@ int enic_get_vnic_config(struct enic *enic)
"vNIC MAC addr %pM wq/rq %d/%d mtu %d\n",
enic->mac_addr, c->wq_desc_count, c->rq_desc_count, c->mtu);
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %d/%d "
- "tso/lro %d/%d intr timer %d usec rss %d\n",
+ "tso %d intr timer %d usec rss %d\n",
ENIC_SETTING(enic, TXCSUM), ENIC_SETTING(enic, RXCSUM),
- ENIC_SETTING(enic, TSO), ENIC_SETTING(enic, LRO),
+ ENIC_SETTING(enic, TSO),
c->intr_timer_usec, ENIC_SETTING(enic, RSS));
return 0;
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index c089b362a36f..68f24ae860ae 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -786,48 +786,6 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
return r;
}
-int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
-{
- u64 a0 = 0, a1 = 0;
- int wait = 1000;
- int ret;
-
- *done = 0;
-
- ret = vnic_dev_cmd(vdev, CMD_INIT_STATUS, &a0, &a1, wait);
- if (ret)
- return ret;
-
- *done = (a0 == 0);
-
- *err = (a0 == 0) ? (int)a1:0;
-
- return 0;
-}
-
-int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
-{
- u64 a0, a1 = len;
- int wait = 1000;
- dma_addr_t prov_pa;
- void *prov_buf;
- int ret;
-
- prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
- if (!prov_buf)
- return -ENOMEM;
-
- memcpy(prov_buf, buf, len);
-
- a0 = prov_pa;
-
- ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO, &a0, &a1, wait);
-
- pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
-
- return ret;
-}
-
int vnic_dev_deinit(struct vnic_dev *vdev)
{
u64 a0 = 0, a1 = 0;
@@ -927,4 +885,59 @@ err_out:
return NULL;
}
+int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
+{
+ u64 a0, a1 = len;
+ int wait = 1000;
+ dma_addr_t prov_pa;
+ void *prov_buf;
+ int ret;
+
+ prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
+ if (!prov_buf)
+ return -ENOMEM;
+ memcpy(prov_buf, buf, len);
+
+ a0 = prov_pa;
+
+ ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
+
+ pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
+
+ return ret;
+}
+
+int vnic_dev_enable2(struct vnic_dev *vdev, int active)
+{
+ u64 a0, a1 = 0;
+ int wait = 1000;
+
+ a0 = (active ? CMD_ENABLE2_ACTIVE : 0);
+
+ return vnic_dev_cmd(vdev, CMD_ENABLE2, &a0, &a1, wait);
+}
+
+static int vnic_dev_cmd_status(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int *status)
+{
+ u64 a0 = cmd, a1 = 0;
+ int wait = 1000;
+ int ret;
+
+ ret = vnic_dev_cmd(vdev, CMD_STATUS, &a0, &a1, wait);
+ if (!ret)
+ *status = (int)a0;
+
+ return ret;
+}
+
+int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status)
+{
+ return vnic_dev_cmd_status(vdev, CMD_ENABLE2, status);
+}
+
+int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status)
+{
+ return vnic_dev_cmd_status(vdev, CMD_DEINIT, status);
+}
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index e837546213a8..cf482a2c9dd9 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -108,8 +108,6 @@ int vnic_dev_disable(struct vnic_dev *vdev);
int vnic_dev_open(struct vnic_dev *vdev, int arg);
int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
int vnic_dev_init(struct vnic_dev *vdev, int arg);
-int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
-int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
int vnic_dev_deinit(struct vnic_dev *vdev);
int vnic_dev_hang_reset(struct vnic_dev *vdev, int arg);
int vnic_dev_hang_reset_done(struct vnic_dev *vdev, int *done);
@@ -122,5 +120,9 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars);
+int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
+int vnic_dev_enable2(struct vnic_dev *vdev, int active);
+int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
+int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/enic/vnic_devcmd.h b/drivers/net/enic/vnic_devcmd.h
index d833a071bac5..c5569bfb47ac 100644
--- a/drivers/net/enic/vnic_devcmd.h
+++ b/drivers/net/enic/vnic_devcmd.h
@@ -267,17 +267,62 @@ enum vnic_devcmd_cmd {
/*
* As for BY_BDF except a0 is index of hvnlink subordinate vnic
- * or SR-IOV virtual vnic */
+ * or SR-IOV virtual vnic
+ */
CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43),
/*
- * in: (u64)a0=paddr of buffer to put latest VIC VIF-CONFIG-INFO TLV in
- * (u32)a1=length of buffer in a0
- * out: (u64)a0=paddr of buffer with latest VIC VIF-CONFIG-INFO TLV
- * (u32)a1=actual length of latest VIC VIF-CONFIG-INFO TLV */
+ * For HPP toggle:
+ * adapter-info-get
+ * in: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=size of buffer specified in a0.
+ * out: (u64)a0=phsical address of buffer passed in from caller.
+ * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or
+ * 0 if no VIF-CONFIG-INFO TLV was ever received. */
CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+
+ /* init_prov_info2:
+ * Variant of CMD_INIT_PROV_INFO, where it will not try to enable
+ * the vnic until CMD_ENABLE2 is issued.
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info */
+ CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
+
+ /* enable2:
+ * (u32)a0=0 ==> standby
+ * =CMD_ENABLE2_ACTIVE ==> active
+ */
+ CMD_ENABLE2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 48),
+
+ /*
+ * cmd_status:
+ * Returns the status of the specified command
+ * Input:
+ * a0 = command for which status is being queried.
+ * Possible values are:
+ * CMD_SOFT_RESET
+ * CMD_HANG_RESET
+ * CMD_OPEN
+ * CMD_INIT
+ * CMD_INIT_PROV_INFO
+ * CMD_DEINIT
+ * CMD_INIT_PROV_INFO2
+ * CMD_ENABLE2
+ * Output:
+ * if status == STAT_ERROR
+ * a0 = ERR_ENOTSUPPORTED - status for command in a0 is
+ * not supported
+ * if status == STAT_NONE
+ * a0 = status of the devcmd specified in a0 as follows.
+ * ERR_SUCCESS - command in a0 completed successfully
+ * ERR_EINPROGRESS - command in a0 is still in progress
+ */
+ CMD_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 49),
};
+/* CMD_ENABLE2 flags */
+#define CMD_ENABLE2_ACTIVE 0x1
+
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
@@ -315,6 +360,8 @@ enum vnic_devcmd_error {
ERR_ETIMEDOUT = 8,
ERR_ELINKDOWN = 9,
ERR_EMAXRES = 10,
+ ERR_ENOTSUPPORTED = 11,
+ ERR_EINPROGRESS = 12,
};
/*
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
index 4725b79de0ef..24ef8cd40545 100644
--- a/drivers/net/enic/vnic_vic.c
+++ b/drivers/net/enic/vnic_vic.c
@@ -23,7 +23,8 @@
#include "vnic_vic.h"
-struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
+struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui,
+ const u8 type)
{
struct vic_provinfo *vp;
@@ -47,7 +48,7 @@ void vic_provinfo_free(struct vic_provinfo *vp)
}
int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
- void *value)
+ const void *value)
{
struct vic_provinfo_tlv *tlv;
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index f700f5d9e81d..9ef81f148351 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -47,6 +47,7 @@ enum vic_generic_prov_os_type {
VIC_GENERIC_PROV_OS_TYPE_ESX = 1,
VIC_GENERIC_PROV_OS_TYPE_LINUX = 2,
VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3,
+ VIC_GENERIC_PROV_OS_TYPE_SOLARIS = 4,
};
struct vic_provinfo {
@@ -61,14 +62,22 @@ struct vic_provinfo {
} tlv[0];
} __packed;
+#define VIC_PROVINFO_ADD_TLV(vp, tlvtype, tlvlen, data) \
+ do { \
+ err = vic_provinfo_add_tlv(vp, tlvtype, tlvlen, data); \
+ if (err) \
+ goto add_tlv_failure; \
+ } while (0)
+
#define VIC_PROVINFO_MAX_DATA 1385
#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
sizeof(struct vic_provinfo))
-struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type);
+struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, const u8 *oui,
+ const u8 type);
void vic_provinfo_free(struct vic_provinfo *vp);
int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
- void *value);
+ const void *value);
size_t vic_provinfo_size(struct vic_provinfo *vp);
#endif /* _VNIC_VIC_H_ */
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index fb717be511f6..12d28e9d0cb7 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -13,7 +13,7 @@
This driver supports following cards :
- ICL EtherTeam 16i
- ICL EtherTeam 32 EISA
- (Uses true 32 bit transfers rather than 16i compability mode)
+ (Uses true 32 bit transfers rather than 16i compatibility mode)
Example Module usage:
insmod eth16i.o io=0x2a0 mediatype=bnc
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index db0290f05bdf..a83dd312c3ac 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -542,7 +542,7 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
/* Figure out what triggered the interrupt...
* The tricky bit here is that the interrupt source bits get
- * set in INT_SOURCE for an event irregardless of whether that
+ * set in INT_SOURCE for an event regardless of whether that
* event is masked or not. Thus, in order to figure out what
* triggered the interrupt, we need to remove the sources
* for all events that are currently masked. This behaviour
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 380d0614a89a..b5f6173130f4 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -1545,7 +1545,7 @@ static int ewrk3_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
ecmd->supported |= SUPPORTED_10baseT_Half;
- ecmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
ecmd->duplex = DUPLEX_HALF;
return 0;
}
@@ -1604,55 +1604,47 @@ static u32 ewrk3_get_link(struct net_device *dev)
return !(cmr & CMR_LINK);
}
-static int ewrk3_phys_id(struct net_device *dev, u32 data)
+static int ewrk3_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
struct ewrk3_private *lp = netdev_priv(dev);
unsigned long iobase = dev->base_addr;
- unsigned long flags;
u8 cr;
- int count;
-
- /* Toggle LED 4x per second */
- count = data << 2;
- spin_lock_irqsave(&lp->hw_lock, flags);
-
- /* Bail if a PHYS_ID is already in progress */
- if (lp->led_mask == 0) {
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- return -EBUSY;
- }
+ spin_lock_irq(&lp->hw_lock);
- /* Prevent ISR from twiddling the LED */
- lp->led_mask = 0;
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Prevent ISR from twiddling the LED */
+ lp->led_mask = 0;
+ spin_unlock_irq(&lp->hw_lock);
+ return 2; /* cycle on/off twice per second */
- while (count--) {
- /* Toggle the LED */
+ case ETHTOOL_ID_ON:
cr = inb(EWRK3_CR);
- outb(cr ^ CR_LED, EWRK3_CR);
+ outb(cr | CR_LED, EWRK3_CR);
+ break;
- /* Wait a little while */
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- msleep(250);
- spin_lock_irqsave(&lp->hw_lock, flags);
+ case ETHTOOL_ID_OFF:
+ cr = inb(EWRK3_CR);
+ outb(cr & ~CR_LED, EWRK3_CR);
+ break;
- /* Exit if we got a signal */
- if (signal_pending(current))
- break;
+ case ETHTOOL_ID_INACTIVE:
+ lp->led_mask = CR_LED;
+ cr = inb(EWRK3_CR);
+ outb(cr & ~CR_LED, EWRK3_CR);
}
+ spin_unlock_irq(&lp->hw_lock);
- lp->led_mask = CR_LED;
- cr = inb(EWRK3_CR);
- outb(cr & ~CR_LED, EWRK3_CR);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- return signal_pending(current) ? -ERESTARTSYS : 0;
+ return 0;
}
static const struct ethtool_ops ethtool_ops_203 = {
.get_drvinfo = ewrk3_get_drvinfo,
.get_settings = ewrk3_get_settings,
.set_settings = ewrk3_set_settings,
- .phys_id = ewrk3_phys_id,
+ .set_phys_id = ewrk3_set_phys_id,
};
static const struct ethtool_ops ethtool_ops = {
@@ -1660,7 +1652,7 @@ static const struct ethtool_ops ethtool_ops = {
.get_settings = ewrk3_get_settings,
.set_settings = ewrk3_set_settings,
.get_link = ewrk3_get_link,
- .phys_id = ewrk3_phys_id,
+ .set_phys_id = ewrk3_set_phys_id,
};
/*
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index ace318df4c8d..8b2c6d797e6d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -97,11 +97,11 @@ struct bufdesc {
* The following definitions courtesy of commproc.h, which where
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
*/
-#define BD_SC_EMPTY ((ushort)0x8000) /* Recieve is empty */
+#define BD_SC_EMPTY ((ushort)0x8000) /* Receive is empty */
#define BD_SC_READY ((ushort)0x8000) /* Transmit is ready */
#define BD_SC_WRAP ((ushort)0x2000) /* Last buffer descriptor */
#define BD_SC_INTRPT ((ushort)0x1000) /* Interrupt on change */
-#define BD_SC_CM ((ushort)0x0200) /* Continous mode */
+#define BD_SC_CM ((ushort)0x0200) /* Continuous mode */
#define BD_SC_ID ((ushort)0x0100) /* Rec'd too many idles */
#define BD_SC_P ((ushort)0x0100) /* xmt preamble */
#define BD_SC_BR ((ushort)0x0020) /* Break received */
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 7b92897ca66b..d09e8b0add01 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -440,7 +440,7 @@ union ring_type {
#define NV_RX3_VLAN_TAG_PRESENT (1<<16)
#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
-/* Miscelaneous hardware related defines: */
+/* Miscellaneous hardware related defines: */
#define NV_PCI_REGSZ_VER1 0x270
#define NV_PCI_REGSZ_VER2 0x2d4
#define NV_PCI_REGSZ_VER3 0x604
@@ -774,7 +774,6 @@ struct fe_priv {
u32 driver_data;
u32 device_id;
u32 register_size;
- int rx_csum;
u32 mac_in_use;
int mgmt_version;
int mgmt_sema;
@@ -1488,7 +1487,7 @@ static int phy_init(struct net_device *dev)
}
}
- /* some phys clear out pause advertisment on reset, set it back */
+ /* some phys clear out pause advertisement on reset, set it back */
mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
/* restart auto negotiation, power down phy */
@@ -2535,7 +2534,7 @@ static void nv_tx_timeout(struct net_device *dev)
else
nv_tx_done_optimized(dev, np->tx_ring_size);
- /* save current HW postion */
+ /* save current HW position */
if (np->tx_change_owner)
put_tx.ex = np->tx_change_owner->first_tx_desc;
else
@@ -3956,6 +3955,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct fe_priv *np = netdev_priv(dev);
+ u32 speed;
int adv;
spin_lock_irq(&np->lock);
@@ -3975,23 +3975,26 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (netif_carrier_ok(dev)) {
switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
case NVREG_LINKSPEED_10:
- ecmd->speed = SPEED_10;
+ speed = SPEED_10;
break;
case NVREG_LINKSPEED_100:
- ecmd->speed = SPEED_100;
+ speed = SPEED_100;
break;
case NVREG_LINKSPEED_1000:
- ecmd->speed = SPEED_1000;
+ speed = SPEED_1000;
+ break;
+ default:
+ speed = -1;
break;
}
ecmd->duplex = DUPLEX_HALF;
if (np->duplex)
ecmd->duplex = DUPLEX_FULL;
} else {
- ecmd->speed = -1;
+ speed = -1;
ecmd->duplex = -1;
}
-
+ ethtool_cmd_speed_set(ecmd, speed);
ecmd->autoneg = np->autoneg;
ecmd->advertising = ADVERTISED_MII;
@@ -4030,6 +4033,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct fe_priv *np = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(ecmd);
if (ecmd->port != PORT_MII)
return -EINVAL;
@@ -4053,9 +4057,9 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
} else if (ecmd->autoneg == AUTONEG_DISABLE) {
/* Note: autonegotiation disable, speed 1000 intentionally
- * forbidden - noone should need that. */
+ * forbidden - no one should need that. */
- if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
+ if (speed != SPEED_10 && speed != SPEED_100)
return -EINVAL;
if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
return -EINVAL;
@@ -4103,7 +4107,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
adv |= ADVERTISE_100HALF;
if (ecmd->advertising & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
- if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
adv |= ADVERTISE_PAUSE_ASYM;
@@ -4139,16 +4143,16 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+ if (speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
adv |= ADVERTISE_10HALF;
- if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+ if (speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
adv |= ADVERTISE_10FULL;
- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+ if (speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
adv |= ADVERTISE_100HALF;
- if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+ if (speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
adv |= ADVERTISE_100FULL;
np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
- if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisements but disable tx pause */
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
}
@@ -4264,16 +4268,6 @@ static int nv_nway_reset(struct net_device *dev)
return ret;
}
-static int nv_set_tso(struct net_device *dev, u32 value)
-{
- struct fe_priv *np = netdev_priv(dev);
-
- if ((np->driver_data & DEV_HAS_CHECKSUM))
- return ethtool_op_set_tso(dev, value);
- else
- return -EOPNOTSUPP;
-}
-
static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
{
struct fe_priv *np = netdev_priv(dev);
@@ -4449,7 +4443,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+ if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisements but disable tx pause */
adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
adv |= ADVERTISE_PAUSE_ASYM;
@@ -4480,58 +4474,36 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
return 0;
}
-static u32 nv_get_rx_csum(struct net_device *dev)
+static u32 nv_fix_features(struct net_device *dev, u32 features)
{
- struct fe_priv *np = netdev_priv(dev);
- return np->rx_csum != 0;
+ /* vlan is dependent on rx checksum offload */
+ if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
+ features |= NETIF_F_RXCSUM;
+
+ return features;
}
-static int nv_set_rx_csum(struct net_device *dev, u32 data)
+static int nv_set_features(struct net_device *dev, u32 features)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- int retcode = 0;
-
- if (np->driver_data & DEV_HAS_CHECKSUM) {
- if (data) {
- np->rx_csum = 1;
- np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
- } else {
- np->rx_csum = 0;
- /* vlan is dependent on rx checksum offload */
- if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE))
- np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
- }
- if (netif_running(dev)) {
- spin_lock_irq(&np->lock);
- writel(np->txrxctl_bits, base + NvRegTxRxControl);
- spin_unlock_irq(&np->lock);
- }
- } else {
- return -EINVAL;
- }
+ u32 changed = dev->features ^ features;
- return retcode;
-}
+ if (changed & NETIF_F_RXCSUM) {
+ spin_lock_irq(&np->lock);
-static int nv_set_tx_csum(struct net_device *dev, u32 data)
-{
- struct fe_priv *np = netdev_priv(dev);
+ if (features & NETIF_F_RXCSUM)
+ np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+ else
+ np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
- if (np->driver_data & DEV_HAS_CHECKSUM)
- return ethtool_op_set_tx_csum(dev, data);
- else
- return -EOPNOTSUPP;
-}
+ if (netif_running(dev))
+ writel(np->txrxctl_bits, base + NvRegTxRxControl);
-static int nv_set_sg(struct net_device *dev, u32 data)
-{
- struct fe_priv *np = netdev_priv(dev);
+ spin_unlock_irq(&np->lock);
+ }
- if (np->driver_data & DEV_HAS_CHECKSUM)
- return ethtool_op_set_sg(dev, data);
- else
- return -EOPNOTSUPP;
+ return 0;
}
static int nv_get_sset_count(struct net_device *dev, int sset)
@@ -4896,15 +4868,10 @@ static const struct ethtool_ops ops = {
.get_regs_len = nv_get_regs_len,
.get_regs = nv_get_regs,
.nway_reset = nv_nway_reset,
- .set_tso = nv_set_tso,
.get_ringparam = nv_get_ringparam,
.set_ringparam = nv_set_ringparam,
.get_pauseparam = nv_get_pauseparam,
.set_pauseparam = nv_set_pauseparam,
- .get_rx_csum = nv_get_rx_csum,
- .set_rx_csum = nv_set_rx_csum,
- .set_tx_csum = nv_set_tx_csum,
- .set_sg = nv_set_sg,
.get_strings = nv_get_strings,
.get_ethtool_stats = nv_get_ethtool_stats,
.get_sset_count = nv_get_sset_count,
@@ -5235,6 +5202,8 @@ static const struct net_device_ops nv_netdev_ops = {
.ndo_start_xmit = nv_start_xmit,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
+ .ndo_fix_features = nv_fix_features,
+ .ndo_set_features = nv_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = nv_set_mac_address,
.ndo_set_multicast_list = nv_set_multicast,
@@ -5251,6 +5220,8 @@ static const struct net_device_ops nv_netdev_ops_optimized = {
.ndo_start_xmit = nv_start_xmit_optimized,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
+ .ndo_fix_features = nv_fix_features,
+ .ndo_set_features = nv_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = nv_set_mac_address,
.ndo_set_multicast_list = nv_set_multicast,
@@ -5364,11 +5335,10 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->pkt_limit = NV_PKTLIMIT_2;
if (id->driver_data & DEV_HAS_CHECKSUM) {
- np->rx_csum = 1;
np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
- dev->features |= NETIF_F_TSO;
- dev->features |= NETIF_F_GRO;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_RXCSUM;
+ dev->features |= dev->hw_features;
}
np->vlanctl_bits = 0;
@@ -5384,7 +5354,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ;
}
-
err = -ENOMEM;
np->base = ioremap(addr, np->register_size);
if (!np->base)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 24cb953900dd..a9388944f1d3 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -956,8 +956,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_msglevel = fs_get_msglevel,
.set_msglevel = fs_set_msglevel,
- .set_tx_csum = ethtool_op_set_tx_csum, /* local! */
- .set_sg = ethtool_op_set_sg,
.get_regs = fs_get_regs,
};
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 61035fc5599b..b9fbc83d64a7 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -226,8 +226,8 @@ static void set_multicast_finish(struct net_device *dev)
}
FC(fecp, r_cntrl, FEC_RCNTRL_PROM);
- FW(fecp, hash_table_high, fep->fec.hthi);
- FW(fecp, hash_table_low, fep->fec.htlo);
+ FW(fecp, grp_hash_table_high, fep->fec.hthi);
+ FW(fecp, grp_hash_table_low, fep->fec.htlo);
}
static void set_multicast_list(struct net_device *dev)
@@ -273,8 +273,8 @@ static void restart(struct net_device *dev)
/*
* Reset all multicast.
*/
- FW(fecp, hash_table_high, fep->fec.hthi);
- FW(fecp, hash_table_low, fep->fec.htlo);
+ FW(fecp, grp_hash_table_high, fep->fec.hthi);
+ FW(fecp, grp_hash_table_low, fep->fec.htlo);
/*
* Set maximum receive buffer size.
diff --git a/drivers/net/ftmac100.c b/drivers/net/ftmac100.c
index a31661948c42..9bd7746cbfcf 100644
--- a/drivers/net/ftmac100.c
+++ b/drivers/net/ftmac100.c
@@ -139,11 +139,11 @@ static int ftmac100_reset(struct ftmac100 *priv)
* that hardware reset completed (what the f*ck).
* We still need to wait for a while.
*/
- usleep_range(500, 1000);
+ udelay(500);
return 0;
}
- usleep_range(1000, 10000);
+ udelay(1000);
}
netdev_err(netdev, "software reset failed\n");
@@ -772,7 +772,7 @@ static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg)
if ((phycr & FTMAC100_PHYCR_MIIRD) == 0)
return phycr & FTMAC100_PHYCR_MIIRDATA;
- usleep_range(100, 1000);
+ udelay(100);
}
netdev_err(netdev, "mdio read timed out\n");
@@ -801,7 +801,7 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
if ((phycr & FTMAC100_PHYCR_MIIWR) == 0)
return;
- usleep_range(100, 1000);
+ udelay(100);
}
netdev_err(netdev, "mdio write timed out\n");
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 2a0ad9a501bb..ff60b23a5b74 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -365,7 +365,7 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_write(&regs->rir0, DEFAULT_RIR0);
}
- if (priv->rx_csum_enable)
+ if (ndev->features & NETIF_F_RXCSUM)
rctrl |= RCTRL_CHECKSUMMING;
if (priv->extended_hash) {
@@ -463,6 +463,7 @@ static const struct net_device_ops gfar_netdev_ops = {
.ndo_start_xmit = gfar_start_xmit,
.ndo_stop = gfar_close,
.ndo_change_mtu = gfar_change_mtu,
+ .ndo_set_features = gfar_set_features,
.ndo_set_multicast_list = gfar_set_multi,
.ndo_tx_timeout = gfar_timeout,
.ndo_do_ioctl = gfar_ioctl,
@@ -513,7 +514,7 @@ void unlock_tx_qs(struct gfar_private *priv)
/* Returns 1 if incoming frames use an FCB */
static inline int gfar_uses_fcb(struct gfar_private *priv)
{
- return priv->vlgrp || priv->rx_csum_enable ||
+ return priv->vlgrp || (priv->ndev->features & NETIF_F_RXCSUM) ||
(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
}
@@ -1030,10 +1031,11 @@ static int gfar_probe(struct platform_device *ofdev)
netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
- priv->rx_csum_enable = 1;
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
- } else
- priv->rx_csum_enable = 0;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
+ }
priv->vlgrp = NULL;
@@ -2697,7 +2699,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
if (priv->padding)
skb_pull(skb, priv->padding);
- if (priv->rx_csum_enable)
+ if (dev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb);
/* Tell the skb what kind of packet this is */
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index ec5d595ce2e2..fc86f5195445 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -382,23 +382,6 @@ extern const char gfar_driver_version[];
#define BD_LFLAG(flags) ((flags) << 16)
#define BD_LENGTH_MASK 0x0000ffff
-#define CLASS_CODE_UNRECOG 0x00
-#define CLASS_CODE_DUMMY1 0x01
-#define CLASS_CODE_ETHERTYPE1 0x02
-#define CLASS_CODE_ETHERTYPE2 0x03
-#define CLASS_CODE_USER_PROG1 0x04
-#define CLASS_CODE_USER_PROG2 0x05
-#define CLASS_CODE_USER_PROG3 0x06
-#define CLASS_CODE_USER_PROG4 0x07
-#define CLASS_CODE_TCP_IPV4 0x08
-#define CLASS_CODE_UDP_IPV4 0x09
-#define CLASS_CODE_AH_ESP_IPV4 0x0a
-#define CLASS_CODE_SCTP_IPV4 0x0b
-#define CLASS_CODE_TCP_IPV6 0x0c
-#define CLASS_CODE_UDP_IPV6 0x0d
-#define CLASS_CODE_AH_ESP_IPV6 0x0e
-#define CLASS_CODE_SCTP_IPV6 0x0f
-
#define FPR_FILER_MASK 0xFFFFFFFF
#define MAX_FILER_IDX 0xFF
@@ -1043,7 +1026,7 @@ enum gfar_errata {
};
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
- * (Ok, that's not so true anymore, but there is a family resemblence)
+ * (Ok, that's not so true anymore, but there is a family resemblance)
* The GFAR buffer descriptors track the ring buffers. The rx_bd_base
* and tx_bd_base always point to the currently available buffer.
* The dirty_tx tracks the current buffer that is being sent by the
@@ -1100,7 +1083,7 @@ struct gfar_private {
struct device_node *phy_node;
struct device_node *tbi_node;
u32 device_flags;
- unsigned char rx_csum_enable:1,
+ unsigned char
extended_hash:1,
bd_stash_en:1,
rx_filer_enable:1,
@@ -1170,6 +1153,7 @@ extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
extern void gfar_configure_coalescing(struct gfar_private *priv,
unsigned long tx_mask, unsigned long rx_mask);
void gfar_init_sysfs(struct net_device *dev);
+int gfar_set_features(struct net_device *dev, u32 features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 3bc8e276ba4d..493d743839d9 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -517,15 +517,15 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
return err;
}
-static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
+int gfar_set_features(struct net_device *dev, u32 features)
{
struct gfar_private *priv = netdev_priv(dev);
unsigned long flags;
int err = 0, i = 0;
+ u32 changed = dev->features ^ features;
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
- return -EOPNOTSUPP;
-
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
if (dev->flags & IFF_UP) {
/* Halt TX and RX, and process the frames which
@@ -546,58 +546,15 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
- }
- spin_lock_irqsave(&priv->bflock, flags);
- priv->rx_csum_enable = data;
- spin_unlock_irqrestore(&priv->bflock, flags);
+ dev->features = features;
- if (dev->flags & IFF_UP) {
err = startup_gfar(dev);
netif_tx_wake_all_queues(dev);
}
return err;
}
-static uint32_t gfar_get_rx_csum(struct net_device *dev)
-{
- struct gfar_private *priv = netdev_priv(dev);
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
- return 0;
-
- return priv->rx_csum_enable;
-}
-
-static int gfar_set_tx_csum(struct net_device *dev, uint32_t data)
-{
- struct gfar_private *priv = netdev_priv(dev);
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
- return -EOPNOTSUPP;
-
- netif_tx_lock_bh(dev);
-
- if (data)
- dev->features |= NETIF_F_IP_CSUM;
- else
- dev->features &= ~NETIF_F_IP_CSUM;
-
- netif_tx_unlock_bh(dev);
-
- return 0;
-}
-
-static uint32_t gfar_get_tx_csum(struct net_device *dev)
-{
- struct gfar_private *priv = netdev_priv(dev);
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
- return 0;
-
- return (dev->features & NETIF_F_IP_CSUM) != 0;
-}
-
static uint32_t gfar_get_msglevel(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -645,42 +602,6 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
}
#endif
-static int gfar_ethflow_to_class(int flow_type, u64 *class)
-{
- switch (flow_type) {
- case TCP_V4_FLOW:
- *class = CLASS_CODE_TCP_IPV4;
- break;
- case UDP_V4_FLOW:
- *class = CLASS_CODE_UDP_IPV4;
- break;
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- *class = CLASS_CODE_AH_ESP_IPV4;
- break;
- case SCTP_V4_FLOW:
- *class = CLASS_CODE_SCTP_IPV4;
- break;
- case TCP_V6_FLOW:
- *class = CLASS_CODE_TCP_IPV6;
- break;
- case UDP_V6_FLOW:
- *class = CLASS_CODE_UDP_IPV6;
- break;
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- *class = CLASS_CODE_AH_ESP_IPV6;
- break;
- case SCTP_V6_FLOW:
- *class = CLASS_CODE_SCTP_IPV6;
- break;
- default:
- return 0;
- }
-
- return 1;
-}
-
static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
{
u32 fcr = 0x0, fpr = FPR_FILER_MASK;
@@ -778,11 +699,6 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
case UDP_V6_FLOW:
cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
break;
- case IPV4_FLOW:
- cmp_rqfpr = RQFPR_IPV4;
- case IPV6_FLOW:
- cmp_rqfpr = RQFPR_IPV6;
- break;
default:
printk(KERN_ERR "Right now this class is not supported\n");
return 0;
@@ -848,18 +764,9 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
{
- u64 class;
-
- if (!gfar_ethflow_to_class(cmd->flow_type, &class))
- return -EINVAL;
-
- if (class < CLASS_CODE_USER_PROG1 ||
- class > CLASS_CODE_SCTP_IPV6)
- return -EINVAL;
-
/* write the filer rules here */
if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
- return -1;
+ return -EINVAL;
return 0;
}
@@ -894,11 +801,6 @@ const struct ethtool_ops gfar_ethtool_ops = {
.get_strings = gfar_gstrings,
.get_sset_count = gfar_sset_count,
.get_ethtool_stats = gfar_fill_stats,
- .get_rx_csum = gfar_get_rx_csum,
- .get_tx_csum = gfar_get_tx_csum,
- .set_rx_csum = gfar_set_rx_csum,
- .set_tx_csum = gfar_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
.get_msglevel = gfar_get_msglevel,
.set_msglevel = gfar_set_msglevel,
#ifdef CONFIG_PM
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 396ff7d785d1..f181304a7ab6 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -901,7 +901,7 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
skb_put(skb, pkt_len);
- if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
+ if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -1142,41 +1142,6 @@ static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, vo
buff[i] = greth_read_bd(&greth_regs[i]);
}
-static u32 greth_get_rx_csum(struct net_device *dev)
-{
- struct greth_private *greth = netdev_priv(dev);
- return (greth->flags & GRETH_FLAG_RX_CSUM) != 0;
-}
-
-static int greth_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct greth_private *greth = netdev_priv(dev);
-
- spin_lock_bh(&greth->devlock);
-
- if (data)
- greth->flags |= GRETH_FLAG_RX_CSUM;
- else
- greth->flags &= ~GRETH_FLAG_RX_CSUM;
-
- spin_unlock_bh(&greth->devlock);
-
- return 0;
-}
-
-static u32 greth_get_tx_csum(struct net_device *dev)
-{
- return (dev->features & NETIF_F_IP_CSUM) != 0;
-}
-
-static int greth_set_tx_csum(struct net_device *dev, u32 data)
-{
- netif_tx_lock_bh(dev);
- ethtool_op_set_tx_csum(dev, data);
- netif_tx_unlock_bh(dev);
- return 0;
-}
-
static const struct ethtool_ops greth_ethtool_ops = {
.get_msglevel = greth_get_msglevel,
.set_msglevel = greth_set_msglevel,
@@ -1185,10 +1150,6 @@ static const struct ethtool_ops greth_ethtool_ops = {
.get_drvinfo = greth_get_drvinfo,
.get_regs_len = greth_get_regs_len,
.get_regs = greth_get_regs,
- .get_rx_csum = greth_get_rx_csum,
- .set_rx_csum = greth_set_rx_csum,
- .get_tx_csum = greth_get_tx_csum,
- .set_tx_csum = greth_set_tx_csum,
.get_link = ethtool_op_get_link,
};
@@ -1570,9 +1531,10 @@ static int __devinit greth_of_probe(struct platform_device *ofdev)
GRETH_REGSAVE(regs->status, 0xFF);
if (greth->gbit_mac) {
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_RXCSUM;
+ dev->features = dev->hw_features | NETIF_F_HIGHDMA;
greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
- greth->flags = GRETH_FLAG_RX_CSUM;
}
if (greth->multicast) {
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
index be0f2062bd14..9a0040dee4da 100644
--- a/drivers/net/greth.h
+++ b/drivers/net/greth.h
@@ -77,9 +77,6 @@
*/
#define MAX_FRAME_SIZE 1520
-/* Flags */
-#define GRETH_FLAG_RX_CSUM 0x1
-
/* GRETH APB registers */
struct greth_regs {
u32 control;
@@ -133,7 +130,6 @@ struct greth_private {
unsigned int duplex;
u32 msg_enable;
- u32 flags;
u8 phyaddr;
u8 multicast;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 80d25ed53344..a09041aa8509 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -132,13 +132,8 @@ static int tx_params[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/*
* RX_CHECKSUM turns on card-generated receive checksum generation for
* TCP and UDP packets. Otherwise the upper layers do the calculation.
- * TX_CHECKSUM won't do anything too useful, even if it works. There's no
- * easy mechanism by which to tell the TCP/UDP stack that it need not
- * generate checksums for this device. But if somebody can find a way
- * to get that to work, most of the card work is in here already.
* 3/10/1999 Pete Wyckoff <wyckoff@ca.sandia.gov>
*/
-#undef TX_CHECKSUM
#define RX_CHECKSUM
/* Operational parameters that usually are not changed. */
@@ -630,11 +625,6 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
-#ifdef TX_CHECKSUM
- printk("check that skbcopy in ip_queue_xmit isn't happening\n");
- dev->hard_header_len += 8; /* for cksum tag */
-#endif
-
for (i = 0; i < 6; i++)
dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
: readb(ioaddr + StationAddr + i);
@@ -937,11 +927,7 @@ static int hamachi_open(struct net_device *dev)
/* always 1, takes no more time to do it */
writew(0x0001, ioaddr + RxChecksum);
-#ifdef TX_CHECKSUM
- writew(0x0001, ioaddr + TxChecksum);
-#else
writew(0x0000, ioaddr + TxChecksum);
-#endif
writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
writew(0x215F, ioaddr + MACCnfg);
writew(0x000C, ioaddr + FrameGap0);
@@ -1226,40 +1212,6 @@ static void hamachi_init_ring(struct net_device *dev)
}
-#ifdef TX_CHECKSUM
-#define csum_add(it, val) \
-do { \
- it += (u16) (val); \
- if (it & 0xffff0000) { \
- it &= 0xffff; \
- ++it; \
- } \
-} while (0)
- /* printk("add %04x --> %04x\n", val, it); \ */
-
-/* uh->len already network format, do not swap */
-#define pseudo_csum_udp(sum,ih,uh) do { \
- sum = 0; \
- csum_add(sum, (ih)->saddr >> 16); \
- csum_add(sum, (ih)->saddr & 0xffff); \
- csum_add(sum, (ih)->daddr >> 16); \
- csum_add(sum, (ih)->daddr & 0xffff); \
- csum_add(sum, cpu_to_be16(IPPROTO_UDP)); \
- csum_add(sum, (uh)->len); \
-} while (0)
-
-/* swap len */
-#define pseudo_csum_tcp(sum,ih,len) do { \
- sum = 0; \
- csum_add(sum, (ih)->saddr >> 16); \
- csum_add(sum, (ih)->saddr & 0xffff); \
- csum_add(sum, (ih)->daddr >> 16); \
- csum_add(sum, (ih)->daddr & 0xffff); \
- csum_add(sum, cpu_to_be16(IPPROTO_TCP)); \
- csum_add(sum, htons(len)); \
-} while (0)
-#endif
-
static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
@@ -1292,36 +1244,6 @@ static netdev_tx_t hamachi_start_xmit(struct sk_buff *skb,
hmp->tx_skbuff[entry] = skb;
-#ifdef TX_CHECKSUM
- {
- /* tack on checksum tag */
- u32 tagval = 0;
- struct ethhdr *eh = (struct ethhdr *)skb->data;
- if (eh->h_proto == cpu_to_be16(ETH_P_IP)) {
- struct iphdr *ih = (struct iphdr *)((char *)eh + ETH_HLEN);
- if (ih->protocol == IPPROTO_UDP) {
- struct udphdr *uh
- = (struct udphdr *)((char *)ih + ih->ihl*4);
- u32 offset = ((unsigned char *)uh + 6) - skb->data;
- u32 pseudo;
- pseudo_csum_udp(pseudo, ih, uh);
- pseudo = htons(pseudo);
- printk("udp cksum was %04x, sending pseudo %04x\n",
- uh->check, pseudo);
- uh->check = 0; /* zero out uh->check before card calc */
- /*
- * start at 14 (skip ethhdr), store at offset (uh->check),
- * use pseudo value given.
- */
- tagval = (14 << 24) | (offset << 16) | pseudo;
- } else if (ih->protocol == IPPROTO_TCP) {
- printk("tcp, no auto cksum\n");
- }
- }
- *(u32 *)skb_push(skb, 8) = tagval;
- }
-#endif
-
hmp->tx_ring[entry].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
skb->data, skb->len, PCI_DMA_TODEVICE));
diff --git a/drivers/net/hamradio/Makefile b/drivers/net/hamradio/Makefile
index 9def86704a91..104096070026 100644
--- a/drivers/net/hamradio/Makefile
+++ b/drivers/net/hamradio/Makefile
@@ -3,7 +3,7 @@
#
#
# 19971130 Moved the amateur radio related network drivers from
-# drivers/net/ to drivers/hamradio for easier maintainance.
+# drivers/net/ to drivers/hamradio for easier maintenance.
# Joerg Reuter DL1BKE <jreuter@yaina.de>
#
# 20000806 Rewritten to use lists instead of if-statements.
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 8931168d3e74..18d8affecd1b 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -516,10 +516,6 @@ static int bpq_new_device(struct net_device *edev)
memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
- err = dev_alloc_name(ndev, ndev->name);
- if (err < 0)
- goto error;
-
err = register_netdevice(ndev);
if (err)
goto error;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 7d9ced0738c5..96a98d2ff151 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -30,7 +30,7 @@
* 0.1 F1OAT 07.06.98 Add timer polling routine for channel arbitration
* 0.2 F6FBB 08.06.98 Added delay after FPGA programming
* 0.3 F6FBB 29.07.98 Delayed PTT implementation for dupmode=2
- * 0.4 F6FBB 30.07.98 Added TxTail, Slottime and Persistance
+ * 0.4 F6FBB 30.07.98 Added TxTail, Slottime and Persistence
* 0.5 F6FBB 01.08.98 Shared IRQs, /proc/net and network statistics
* 0.6 F6FBB 25.08.98 Added 1200Bds format
* 0.7 F6FBB 12.09.98 Added to the kernel configuration
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 8e2c4601b5f5..c52a1df5d922 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -180,22 +180,22 @@ struct hp100_private {
u_int *page_vaddr_algn; /* Aligned virtual address of allocated page */
u_long whatever_offset; /* Offset to bus/phys/dma address */
- int rxrcommit; /* # Rx PDLs commited to adapter */
- int txrcommit; /* # Tx PDLs commited to adapter */
+ int rxrcommit; /* # Rx PDLs committed to adapter */
+ int txrcommit; /* # Tx PDLs committed to adapter */
};
/*
* variables
*/
#ifdef CONFIG_ISA
-static const char *hp100_isa_tbl[] = {
+static const char *const hp100_isa_tbl[] __devinitconst = {
"HWPF150", /* HP J2573 rev A */
"HWP1950", /* HP J2573 */
};
#endif
#ifdef CONFIG_EISA
-static struct eisa_device_id hp100_eisa_tbl[] = {
+static const struct eisa_device_id hp100_eisa_tbl[] __devinitconst = {
{ "HWPF180" }, /* HP J2577 rev A */
{ "HWP1920" }, /* HP 27248B */
{ "HWP1940" }, /* HP J2577 */
@@ -336,7 +336,7 @@ static __devinit const char *hp100_read_id(int ioaddr)
}
#ifdef CONFIG_ISA
-static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
+static __devinit int hp100_isa_probe1(struct net_device *dev, int ioaddr)
{
const char *sig;
int i;
@@ -372,7 +372,7 @@ static __init int hp100_isa_probe1(struct net_device *dev, int ioaddr)
* EISA and PCI are handled by device infrastructure.
*/
-static int __init hp100_isa_probe(struct net_device *dev, int addr)
+static int __devinit hp100_isa_probe(struct net_device *dev, int addr)
{
int err = -ENODEV;
@@ -396,7 +396,7 @@ static int __init hp100_isa_probe(struct net_device *dev, int addr)
#endif /* CONFIG_ISA */
#if !defined(MODULE) && defined(CONFIG_ISA)
-struct net_device * __init hp100_probe(int unit)
+struct net_device * __devinit hp100_probe(int unit)
{
struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
int err;
@@ -716,7 +716,7 @@ static int __devinit hp100_probe1(struct net_device *dev, int ioaddr,
* implemented/tested only with the lassen chip anyway... */
if (lp->mode == 1) { /* busmaster */
dma_addr_t page_baddr;
- /* Get physically continous memory for TX & RX PDLs */
+ /* Get physically continuous memory for TX & RX PDLs */
/* Conversion to new PCI API :
* Pages are always aligned and zeroed, no need to it ourself.
* Doc says should be OK for EISA bus as well - Jean II */
@@ -1596,7 +1596,7 @@ drop:
/* clean_txring checks if packets have been sent by the card by reading
* the TX_PDL register from the performance page and comparing it to the
- * number of commited packets. It then frees the skb's of the packets that
+ * number of committed packets. It then frees the skb's of the packets that
* obviously have been sent to the network.
*
* Needs the PERFORMANCE page selected.
@@ -1617,7 +1617,7 @@ static void hp100_clean_txring(struct net_device *dev)
#ifdef HP100_DEBUG
if (donecount > MAX_TX_PDL)
- printk("hp100: %s: Warning: More PDLs transmitted than commited to card???\n", dev->name);
+ printk("hp100: %s: Warning: More PDLs transmitted than committed to card???\n", dev->name);
#endif
for (; 0 != donecount; donecount--) {
@@ -1765,7 +1765,7 @@ drop:
* Receive Function (Non-Busmaster mode)
* Called when an "Receive Packet" interrupt occurs, i.e. the receive
* packet counter is non-zero.
- * For non-busmaster, this function does the whole work of transfering
+ * For non-busmaster, this function does the whole work of transferring
* the packet to the host memory and then up to higher layers via skb
* and netif_rx.
*/
@@ -1892,7 +1892,7 @@ static void hp100_rx_bm(struct net_device *dev)
/* RX_PKT_CNT states how many PDLs are currently formatted and available to
* the cards BM engine */
if ((hp100_inw(RX_PKT_CNT) & 0x00ff) >= lp->rxrcommit) {
- printk("hp100: %s: More packets received than commited? RX_PKT_CNT=0x%x, commit=0x%x\n",
+ printk("hp100: %s: More packets received than committed? RX_PKT_CNT=0x%x, commit=0x%x\n",
dev->name, hp100_inw(RX_PKT_CNT) & 0x00ff,
lp->rxrcommit);
return;
@@ -2256,7 +2256,7 @@ static irqreturn_t hp100_interrupt(int irq, void *dev_id)
if (lp->mode != 1) /* non busmaster */
hp100_rx(dev);
else if (!(val & HP100_RX_PDL_FILL_COMPL)) {
- /* Shouldnt happen - maybe we missed a RX_PDL_FILL Interrupt? */
+ /* Shouldn't happen - maybe we missed a RX_PDL_FILL Interrupt? */
hp100_rx_bm(dev);
}
}
@@ -2843,7 +2843,7 @@ static void cleanup_dev(struct net_device *d)
}
#ifdef CONFIG_EISA
-static int __init hp100_eisa_probe (struct device *gendev)
+static int __devinit hp100_eisa_probe (struct device *gendev)
{
struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
struct eisa_device *edev = to_eisa_device(gendev);
diff --git a/drivers/net/hp100.h b/drivers/net/hp100.h
index e6ca128a5564..b60e96fe38b4 100644
--- a/drivers/net/hp100.h
+++ b/drivers/net/hp100.h
@@ -109,7 +109,7 @@
#define HP100_REG_MAC_CFG_2 0x0d /* RW: (8) Misc MAC functions */
#define HP100_REG_MAC_CFG_3 0x0e /* RW: (8) Misc MAC functions */
#define HP100_REG_MAC_CFG_4 0x0f /* R: (8) Misc MAC states */
-#define HP100_REG_DROPPED 0x10 /* R: (16),11:0 Pkts cant fit in mem */
+#define HP100_REG_DROPPED 0x10 /* R: (16),11:0 Pkts can't fit in mem */
#define HP100_REG_CRC 0x12 /* R: (8) Pkts with CRC */
#define HP100_REG_ABORT 0x13 /* R: (8) Aborted Tx pkts */
#define HP100_REG_TRAIN_REQUEST 0x14 /* RW: (16) Endnode MAC register. */
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3bb990b6651a..079450fe5e96 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2053,13 +2053,6 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev,
mutex_unlock(&dev->link_lock);
}
-static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
-{
- struct emac_instance *dev = netdev_priv(ndev);
-
- return dev->tah_dev != NULL;
-}
-
static int emac_get_regs_len(struct emac_instance *dev)
{
if (emac_has_feature(dev, EMAC_FTR_EMAC4))
@@ -2203,15 +2196,11 @@ static const struct ethtool_ops emac_ethtool_ops = {
.get_ringparam = emac_ethtool_get_ringparam,
.get_pauseparam = emac_ethtool_get_pauseparam,
- .get_rx_csum = emac_ethtool_get_rx_csum,
-
.get_strings = emac_ethtool_get_strings,
.get_sset_count = emac_ethtool_get_sset_count,
.get_ethtool_stats = emac_ethtool_get_ethtool_stats,
.get_link = ethtool_op_get_link,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .get_sg = ethtool_op_get_sg,
};
static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2859,8 +2848,10 @@ static int __devinit emac_probe(struct platform_device *ofdev)
if (err != 0)
goto err_detach_tah;
- if (dev->tah_dev)
- ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
+ if (dev->tah_dev) {
+ ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG;
+ ndev->features |= ndev->hw_features | NETIF_F_RXCSUM;
+ }
ndev->watchdog_timeo = 5 * HZ;
if (emac_phy_supports_gige(dev->phy_mode)) {
ndev->netdev_ops = &emac_gige_netdev_ops;
diff --git a/drivers/net/ibm_newemac/tah.c b/drivers/net/ibm_newemac/tah.c
index 8ead6a96abaa..5f51bf7c9dc5 100644
--- a/drivers/net/ibm_newemac/tah.c
+++ b/drivers/net/ibm_newemac/tah.c
@@ -60,7 +60,7 @@ void tah_reset(struct platform_device *ofdev)
printk(KERN_ERR "%s: reset timeout\n",
ofdev->dev.of_node->full_name);
- /* 10KB TAH TX FIFO accomodates the max MTU of 9000 */
+ /* 10KB TAH TX FIFO accommodates the max MTU of 9000 */
out_be32(&p->mr,
TAH_MR_CVR | TAH_MR_ST_768 | TAH_MR_TFS_10KB | TAH_MR_DTFP |
TAH_MR_DIG);
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 94d9969ec0bb..136d7544cc33 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -53,7 +53,7 @@ History:
still work with 2.0.x....
Jan 28th, 2000
in Linux 2.2.13, the version.h file mysteriously didn't get
- included. Added a workaround for this. Futhermore, it now
+ included. Added a workaround for this. Furthermore, it now
not only compiles as a modules ;-)
Jan 30th, 2000
newer kernels automatically probe more than one board, so the
@@ -481,7 +481,7 @@ static void InitBoard(struct net_device *dev)
if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
rcrval |= RCREG_AMC;
- /* promiscous mode ? */
+ /* promiscuous mode ? */
if (dev->flags & IFF_PROMISC)
rcrval |= RCREG_PRO;
@@ -782,7 +782,8 @@ static int ibmlana_open(struct net_device *dev)
/* register resources - only necessary for IRQ */
- result = request_irq(priv->realirq, irq_handler, IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
+ result = request_irq(priv->realirq, irq_handler, IRQF_SHARED,
+ dev->name, dev);
if (result != 0) {
printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq);
return result;
@@ -894,12 +895,12 @@ static int ibmlana_irq;
static int ibmlana_io;
static int startslot; /* counts through slots when probing multiple devices */
-static short ibmlana_adapter_ids[] __initdata = {
+static const short ibmlana_adapter_ids[] __devinitconst = {
IBM_LANA_ID,
0x0000
};
-static char *ibmlana_adapter_names[] __devinitdata = {
+static const char *const ibmlana_adapter_names[] __devinitconst = {
"IBM LAN Adapter/A",
NULL
};
diff --git a/drivers/net/ibmlana.h b/drivers/net/ibmlana.h
index aa3ddbdee4bb..accd5efc9c8a 100644
--- a/drivers/net/ibmlana.h
+++ b/drivers/net/ibmlana.h
@@ -90,7 +90,7 @@ typedef struct {
#define RCREG_ERR 0x8000 /* accept damaged and collided pkts */
#define RCREG_RNT 0x4000 /* accept packets that are < 64 */
#define RCREG_BRD 0x2000 /* accept broadcasts */
-#define RCREG_PRO 0x1000 /* promiscous mode */
+#define RCREG_PRO 0x1000 /* promiscuous mode */
#define RCREG_AMC 0x0800 /* accept all multicasts */
#define RCREG_LB_NONE 0x0000 /* no loopback */
#define RCREG_LB_MAC 0x0200 /* MAC loopback */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 5522d459654c..b388d782c7c4 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -710,7 +710,7 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
SUPPORTED_FIBRE);
cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
ADVERTISED_FIBRE);
- cmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
cmd->duplex = DUPLEX_FULL;
cmd->port = PORT_FIBRE;
cmd->phy_address = 0;
@@ -729,45 +729,24 @@ static void netdev_get_drvinfo(struct net_device *dev,
sizeof(info->version) - 1);
}
-static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
+static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
{
- struct ibmveth_adapter *adapter = netdev_priv(dev);
-
- if (data) {
- adapter->rx_csum = 1;
- } else {
- /*
- * Since the ibmveth firmware interface does not have the
- * concept of separate tx/rx checksum offload enable, if rx
- * checksum is disabled we also have to disable tx checksum
- * offload. Once we disable rx checksum offload, we are no
- * longer allowed to send tx buffers that are not properly
- * checksummed.
- */
- adapter->rx_csum = 0;
- dev->features &= ~NETIF_F_IP_CSUM;
- dev->features &= ~NETIF_F_IPV6_CSUM;
- }
-}
+ /*
+ * Since the ibmveth firmware interface does not have the
+ * concept of separate tx/rx checksum offload enable, if rx
+ * checksum is disabled we also have to disable tx checksum
+ * offload. Once we disable rx checksum offload, we are no
+ * longer allowed to send tx buffers that are not properly
+ * checksummed.
+ */
-static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
-{
- struct ibmveth_adapter *adapter = netdev_priv(dev);
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_ALL_CSUM;
- if (data) {
- if (adapter->fw_ipv4_csum_support)
- dev->features |= NETIF_F_IP_CSUM;
- if (adapter->fw_ipv6_csum_support)
- dev->features |= NETIF_F_IPV6_CSUM;
- adapter->rx_csum = 1;
- } else {
- dev->features &= ~NETIF_F_IP_CSUM;
- dev->features &= ~NETIF_F_IPV6_CSUM;
- }
+ return features;
}
-static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
- void (*done) (struct net_device *, u32))
+static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
unsigned long set_attr, clr_attr, ret_attr;
@@ -827,8 +806,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
} else
adapter->fw_ipv6_csum_support = data;
- if (ret == H_SUCCESS || ret6 == H_SUCCESS)
- done(dev, data);
+ if (ret != H_SUCCESS || ret6 != H_SUCCESS)
+ adapter->rx_csum = data;
else
rc1 = -EIO;
} else {
@@ -844,41 +823,22 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
return rc1 ? rc1 : rc2;
}
-static int ibmveth_set_rx_csum(struct net_device *dev, u32 data)
+static int ibmveth_set_features(struct net_device *dev, u32 features)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
+ int rx_csum = !!(features & NETIF_F_RXCSUM);
+ int rc;
- if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum))
- return 0;
-
- return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags);
-}
-
-static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
-{
- struct ibmveth_adapter *adapter = netdev_priv(dev);
- int rc = 0;
-
- if (data && (dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
- return 0;
- if (!data && !(dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+ if (rx_csum == adapter->rx_csum)
return 0;
- if (data && !adapter->rx_csum)
- rc = ibmveth_set_csum_offload(dev, data,
- ibmveth_set_tx_csum_flags);
- else
- ibmveth_set_tx_csum_flags(dev, data);
+ rc = ibmveth_set_csum_offload(dev, rx_csum);
+ if (rc && !adapter->rx_csum)
+ dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
return rc;
}
-static u32 ibmveth_get_rx_csum(struct net_device *dev)
-{
- struct ibmveth_adapter *adapter = netdev_priv(dev);
- return adapter->rx_csum;
-}
-
static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
int i;
@@ -914,13 +874,9 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_settings = netdev_get_settings,
.get_link = ethtool_op_get_link,
- .set_tx_csum = ibmveth_set_tx_csum,
- .get_rx_csum = ibmveth_get_rx_csum,
- .set_rx_csum = ibmveth_set_rx_csum,
.get_strings = ibmveth_get_strings,
.get_sset_count = ibmveth_get_sset_count,
.get_ethtool_stats = ibmveth_get_ethtool_stats,
- .set_sg = ethtool_op_set_sg,
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1345,6 +1301,8 @@ static const struct net_device_ops ibmveth_netdev_ops = {
.ndo_set_multicast_list = ibmveth_set_multicast_list,
.ndo_do_ioctl = ibmveth_ioctl,
.ndo_change_mtu = ibmveth_change_mtu,
+ .ndo_fix_features = ibmveth_fix_features,
+ .ndo_set_features = ibmveth_set_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1412,7 +1370,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->features |= NETIF_F_SG;
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features |= netdev->hw_features;
memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
@@ -1437,7 +1397,7 @@ static int __devinit ibmveth_probe(struct vio_dev *dev,
netdev_dbg(netdev, "registering netdev...\n");
- ibmveth_set_csum_offload(netdev, 1, ibmveth_set_tx_csum_flags);
+ ibmveth_set_features(netdev, netdev->features);
rc = register_netdev(netdev);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index e07d487f015a..4fecaed67fc4 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -233,10 +233,6 @@ static int __init ifb_init_one(int index)
if (!dev_ifb)
return -ENOMEM;
- err = dev_alloc_name(dev_ifb, dev_ifb->name);
- if (err < 0)
- goto err;
-
dev_ifb->rtnl_link_ops = &ifb_link_ops;
err = register_netdevice(dev_ifb);
if (err < 0)
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 6b256c275e10..0f563c8c5ffc 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -244,6 +244,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
+ /*
+ * Check for invalid size
+ */
+ if ((hw->mac.type == e1000_82576) && (size > 15)) {
+ printk("igb: The NVM size is not valid, "
+ "defaulting to 32K.\n");
+ size = 15;
+ }
nvm->word_size = 1 << size;
if (nvm->word_size == (1 << 15))
nvm->page_size = 128;
@@ -1877,7 +1885,7 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
}
if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
- /* if chekcsums compatibility bit is set validate checksums
+ /* if checksums compatibility bit is set validate checksums
* for all 4 ports. */
eeprom_regions_count = 4;
}
@@ -1988,6 +1996,7 @@ static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
out:
return ret_val;
}
+
/**
* igb_set_eee_i350 - Enable/disable EEE support
* @hw: pointer to the HW structure
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 90c5e01e9235..ce8255fc3c52 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -181,7 +181,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
* address and must override the actual permanent MAC address. If an
* alternate MAC address is fopund it is saved in the hw struct and
* prgrammed into RAR0 and the cuntion returns success, otherwise the
- * fucntion returns an error.
+ * function returns an error.
**/
s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
{
@@ -982,7 +982,7 @@ out:
}
/**
- * igb_get_speed_and_duplex_copper - Retreive current speed/duplex
+ * igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
* @hw: pointer to the HW structure
* @speed: stores the current speed
* @duplex: stores the current duplex
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 6694bf3e5ad9..d639706eb3f6 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1421,7 +1421,7 @@ out:
}
/**
- * igb_check_downshift - Checks whether a downshift in speed occured
+ * igb_check_downshift - Checks whether a downshift in speed occurred
* @hw: pointer to the HW structure
*
* Success returns 0, Failure returns 1
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 1c687e298d5e..f4fa4b1751cf 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -360,7 +360,7 @@ extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
extern void igb_reset(struct igb_adapter *);
-extern int igb_set_spd_dplx(struct igb_adapter *, u16);
+extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
extern int igb_setup_tx_resources(struct igb_ring *);
extern int igb_setup_rx_resources(struct igb_ring *);
extern void igb_free_tx_resources(struct igb_ring *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index d976733bbcc2..fdc895e5a3f8 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -178,11 +178,11 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if ((status & E1000_STATUS_SPEED_1000) ||
hw->phy.media_type != e1000_media_type_copper)
- ecmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
else if (status & E1000_STATUS_SPEED_100)
- ecmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
else
- ecmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
@@ -190,7 +190,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
@@ -223,7 +223,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (adapter->fc_autoneg)
hw->fc.requested_mode = e1000_fc_default;
} else {
- if (igb_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (igb_set_spd_dplx(adapter, speed, ecmd->duplex)) {
clear_bit(__IGB_RESETTING, &adapter->state);
return -EINVAL;
}
@@ -1963,27 +1964,28 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* bit defines for adapter->led_status */
#define IGB_LED_ON 0
-static int igb_phys_id(struct net_device *netdev, u32 data)
+static int igb_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- unsigned long timeout;
- timeout = data * 1000;
-
- /*
- * msleep_interruptable only accepts unsigned int so we are limited
- * in how long a duration we can wait
- */
- if (!timeout || timeout > UINT_MAX)
- timeout = UINT_MAX;
-
- igb_blink_led(hw);
- msleep_interruptible(timeout);
-
- igb_led_off(hw);
- clear_bit(IGB_LED_ON, &adapter->led_status);
- igb_cleanup_led(hw);
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ igb_blink_led(hw);
+ return 2;
+ case ETHTOOL_ID_ON:
+ igb_blink_led(hw);
+ break;
+ case ETHTOOL_ID_OFF:
+ igb_led_off(hw);
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ igb_led_off(hw);
+ clear_bit(IGB_LED_ON, &adapter->led_status);
+ igb_cleanup_led(hw);
+ break;
+ }
return 0;
}
@@ -2215,7 +2217,7 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_tso = igb_set_tso,
.self_test = igb_diag_test,
.get_strings = igb_get_strings,
- .phys_id = igb_phys_id,
+ .set_phys_id = igb_set_phys_id,
.get_sset_count = igb_get_sset_count,
.get_ethtool_stats = igb_get_ethtool_stats,
.get_coalesce = igb_get_coalesce,
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 3d850af0cdda..ce7838e55827 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -200,7 +200,7 @@ static struct pci_driver igb_driver = {
.probe = igb_probe,
.remove = __devexit_p(igb_remove),
#ifdef CONFIG_PM
- /* Power Managment Hooks */
+ /* Power Management Hooks */
.suspend = igb_suspend,
.resume = igb_resume,
#endif
@@ -2292,7 +2292,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter)
/**
* Scale the NIC clock cycle by a large factor so that
* relatively small clock corrections can be added or
- * substracted at each clock tick. The drawbacks of a large
+ * subtracted at each clock tick. The drawbacks of a large
* factor are a) that the clock register overflows more quickly
* (not such a big deal) and b) that the increment per tick has
* to fit into 24 bits. As a result we need to use a shift of
@@ -3409,7 +3409,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
} else {
/*
* Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscous mode so
+ * then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
count = igb_write_mc_addr_list(netdev);
@@ -3423,7 +3423,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
/*
* Write addresses to available RAR registers, if there is not
* sufficient space to store all the addresses then enable
- * unicast promiscous mode
+ * unicast promiscuous mode
*/
count = igb_write_uc_addr_list(netdev);
if (count < 0) {
@@ -3532,6 +3532,25 @@ bool igb_has_link(struct igb_adapter *adapter)
return link_active;
}
+static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
+{
+ bool ret = false;
+ u32 ctrl_ext, thstat;
+
+ /* check for thermal sensor event on i350, copper only */
+ if (hw->mac.type == e1000_i350) {
+ thstat = rd32(E1000_THSTAT);
+ ctrl_ext = rd32(E1000_CTRL_EXT);
+
+ if ((hw->phy.media_type == e1000_media_type_copper) &&
+ !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+ ret = !!(thstat & event);
+ }
+ }
+
+ return ret;
+}
+
/**
* igb_watchdog - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
@@ -3550,7 +3569,7 @@ static void igb_watchdog_task(struct work_struct *work)
watchdog_task);
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- u32 link, ctrl_ext, thstat;
+ u32 link;
int i;
link = igb_has_link(adapter);
@@ -3574,25 +3593,14 @@ static void igb_watchdog_task(struct work_struct *work)
((ctrl & E1000_CTRL_RFCE) ? "RX" :
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
- /* check for thermal sensor event on i350,
- * copper only */
- if (hw->mac.type == e1000_i350) {
- thstat = rd32(E1000_THSTAT);
- ctrl_ext = rd32(E1000_CTRL_EXT);
- if ((hw->phy.media_type ==
- e1000_media_type_copper) && !(ctrl_ext &
- E1000_CTRL_EXT_LINK_MODE_SGMII)) {
- if (thstat &
- E1000_THSTAT_LINK_THROTTLE) {
- printk(KERN_INFO "igb: %s The "
- "network adapter link "
- "speed was downshifted "
- "because it "
- "overheated.\n",
- netdev->name);
- }
- }
+ /* check for thermal sensor event */
+ if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
+ printk(KERN_INFO "igb: %s The network adapter "
+ "link speed was downshifted "
+ "because it overheated.\n",
+ netdev->name);
}
+
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
@@ -3618,22 +3626,15 @@ static void igb_watchdog_task(struct work_struct *work)
if (netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
- /* check for thermal sensor event on i350
- * copper only*/
- if (hw->mac.type == e1000_i350) {
- thstat = rd32(E1000_THSTAT);
- ctrl_ext = rd32(E1000_CTRL_EXT);
- if ((hw->phy.media_type ==
- e1000_media_type_copper) && !(ctrl_ext &
- E1000_CTRL_EXT_LINK_MODE_SGMII)) {
- if (thstat & E1000_THSTAT_PWR_DOWN) {
- printk(KERN_ERR "igb: %s The "
- "network adapter was stopped "
- "because it overheated.\n",
+
+ /* check for thermal sensor event */
+ if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
+ printk(KERN_ERR "igb: %s The network adapter "
+ "was stopped because it "
+ "overheated.\n",
netdev->name);
- }
- }
}
+
/* Links status message must follow this format */
printk(KERN_INFO "igb: %s NIC Link is Down\n",
netdev->name);
@@ -4317,7 +4318,7 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
/*
* count reflects descriptors mapped, if 0 or less then mapping error
- * has occured and we need to rewind the descriptor queue
+ * has occurred and we need to rewind the descriptor queue
*/
count = igb_tx_map_adv(tx_ring, skb, first);
if (!count) {
@@ -5352,8 +5353,8 @@ static void igb_msg_task(struct igb_adapter *adapter)
* The unicast table address is a register array of 32-bit registers.
* The table is meant to be used in a way similar to how the MTA is used
* however due to certain limitations in the hardware it is necessary to
- * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
- * enable bit to allow vlan tag stripping when promiscous mode is enabled
+ * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
+ * enable bit to allow vlan tag stripping when promiscuous mode is enabled
**/
static void igb_set_uta(struct igb_adapter *adapter)
{
@@ -6348,21 +6349,25 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
}
}
-int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
+int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
{
struct pci_dev *pdev = adapter->pdev;
struct e1000_mac_info *mac = &adapter->hw.mac;
mac->autoneg = 0;
+ /* Make sure dplx is at most 1 bit and lsb of speed is not set
+ * for the switch() below to work */
+ if ((spd & 1) || (dplx & ~1))
+ goto err_inval;
+
/* Fiber NIC's only allow 1000 Gbps Full duplex */
if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
- spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
- }
+ spd != SPEED_1000 &&
+ dplx != DUPLEX_FULL)
+ goto err_inval;
- switch (spddplx) {
+ switch (spd + dplx) {
case SPEED_10 + DUPLEX_HALF:
mac->forced_speed_duplex = ADVERTISE_10_HALF;
break;
@@ -6381,10 +6386,13 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
- dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
- return -EINVAL;
+ goto err_inval;
}
return 0;
+
+err_inval:
+ dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
+ return -EINVAL;
}
static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 1d943aa7c7a6..b0b14d63dfbf 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -90,18 +90,18 @@ static int igbvf_get_settings(struct net_device *netdev,
status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
- ecmd->speed = 1000;
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
else if (status & E1000_STATUS_SPEED_100)
- ecmd->speed = 100;
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
else
- ecmd->speed = 10;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
if (status & E1000_STATUS_FD)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
@@ -391,11 +391,6 @@ static int igbvf_set_wol(struct net_device *netdev,
return -EOPNOTSUPP;
}
-static int igbvf_phys_id(struct net_device *netdev, u32 data)
-{
- return 0;
-}
-
static int igbvf_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
@@ -527,7 +522,6 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
.self_test = igbvf_diag_test,
.get_sset_count = igbvf_get_sset_count,
.get_strings = igbvf_get_strings,
- .phys_id = igbvf_phys_id,
.get_ethtool_stats = igbvf_get_ethtool_stats,
.get_coalesce = igbvf_get_coalesce,
.set_coalesce = igbvf_set_coalesce,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 6ccc32fd7338..1d04ca6fdaea 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -2227,7 +2227,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
/*
* count reflects descriptors mapped, if 0 then mapping error
- * has occured and we need to rewind the descriptor queue
+ * has occurred and we need to rewind the descriptor queue
*/
count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index c8ee8d28767b..96c95617195f 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -90,8 +90,6 @@ struct ioc3_private {
u32 emcr, ehar_h, ehar_l;
spinlock_t ioc3_lock;
struct mii_if_info mii;
- unsigned long flags;
-#define IOC3_FLAG_RX_CHECKSUMS 1
struct pci_dev *pdev;
@@ -609,7 +607,7 @@ static inline void ioc3_rx(struct net_device *dev)
goto next;
}
- if (likely(ip->flags & IOC3_FLAG_RX_CHECKSUMS))
+ if (likely(dev->features & NETIF_F_RXCSUM))
ioc3_tcpudp_checksum(skb,
w0 & ERXBUF_IPCKSUM_MASK, len);
@@ -1328,6 +1326,7 @@ static int __devinit ioc3_probe(struct pci_dev *pdev,
dev->watchdog_timeo = 5 * HZ;
dev->netdev_ops = &ioc3_netdev_ops;
dev->ethtool_ops = &ioc3_ethtool_ops;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
dev->features = NETIF_F_IP_CSUM;
sw_physid1 = ioc3_mdio_read(dev, ip->mii.phy_id, MII_PHYSID1);
@@ -1618,37 +1617,12 @@ static u32 ioc3_get_link(struct net_device *dev)
return rc;
}
-static u32 ioc3_get_rx_csum(struct net_device *dev)
-{
- struct ioc3_private *ip = netdev_priv(dev);
-
- return ip->flags & IOC3_FLAG_RX_CHECKSUMS;
-}
-
-static int ioc3_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct ioc3_private *ip = netdev_priv(dev);
-
- spin_lock_bh(&ip->ioc3_lock);
- if (data)
- ip->flags |= IOC3_FLAG_RX_CHECKSUMS;
- else
- ip->flags &= ~IOC3_FLAG_RX_CHECKSUMS;
- spin_unlock_bh(&ip->ioc3_lock);
-
- return 0;
-}
-
static const struct ethtool_ops ioc3_ethtool_ops = {
.get_drvinfo = ioc3_get_drvinfo,
.get_settings = ioc3_get_settings,
.set_settings = ioc3_set_settings,
.nway_reset = ioc3_nway_reset,
.get_link = ioc3_get_link,
- .get_rx_csum = ioc3_get_rx_csum,
- .set_rx_csum = ioc3_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum
};
static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index a5b0f0e194bb..58cd3202b48c 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -486,14 +486,14 @@ static int ipg_config_autoneg(struct net_device *dev)
phyctrl = ipg_r8(PHY_CTRL);
mac_ctrl_val = ipg_r32(MAC_CTRL);
- /* Set flags for use in resolving auto-negotation, assuming
+ /* Set flags for use in resolving auto-negotiation, assuming
* non-1000Mbps, half duplex, no flow control.
*/
fullduplex = 0;
txflowcontrol = 0;
rxflowcontrol = 0;
- /* To accomodate a problem in 10Mbps operation,
+ /* To accommodate a problem in 10Mbps operation,
* set a global flag if PHY running in 10Mbps mode.
*/
sp->tenmbpsmode = 0;
@@ -846,7 +846,7 @@ static void init_tfdlist(struct net_device *dev)
}
/*
- * Free all transmit buffers which have already been transfered
+ * Free all transmit buffers which have already been transferred
* via DMA to the IPG.
*/
static void ipg_nic_txfree(struct net_device *dev)
@@ -920,7 +920,7 @@ static void ipg_tx_timeout(struct net_device *dev)
/*
* For TxComplete interrupts, free all transmit
- * buffers which have already been transfered via DMA
+ * buffers which have already been transferred via DMA
* to the IPG.
*/
static void ipg_nic_txcleanup(struct net_device *dev)
@@ -1141,13 +1141,13 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
/* Increment detailed receive error statistics. */
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
- IPG_DEBUG_MSG("RX FIFO overrun occured.\n");
+ IPG_DEBUG_MSG("RX FIFO overrun occurred.\n");
sp->stats.rx_fifo_errors++;
}
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
- IPG_DEBUG_MSG("RX runt occured.\n");
+ IPG_DEBUG_MSG("RX runt occurred.\n");
sp->stats.rx_length_errors++;
}
@@ -1156,7 +1156,7 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
*/
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
- IPG_DEBUG_MSG("RX alignment error occured.\n");
+ IPG_DEBUG_MSG("RX alignment error occurred.\n");
sp->stats.rx_frame_errors++;
}
@@ -1421,12 +1421,12 @@ static int ipg_nic_rx(struct net_device *dev)
/* Increment detailed receive error statistics. */
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
- IPG_DEBUG_MSG("RX FIFO overrun occured.\n");
+ IPG_DEBUG_MSG("RX FIFO overrun occurred.\n");
sp->stats.rx_fifo_errors++;
}
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
- IPG_DEBUG_MSG("RX runt occured.\n");
+ IPG_DEBUG_MSG("RX runt occurred.\n");
sp->stats.rx_length_errors++;
}
@@ -1436,7 +1436,7 @@ static int ipg_nic_rx(struct net_device *dev)
*/
if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
- IPG_DEBUG_MSG("RX alignment error occured.\n");
+ IPG_DEBUG_MSG("RX alignment error occurred.\n");
sp->stats.rx_frame_errors++;
}
@@ -1460,7 +1460,7 @@ static int ipg_nic_rx(struct net_device *dev)
}
} else {
- /* Adjust the new buffer length to accomodate the size
+ /* Adjust the new buffer length to accommodate the size
* of the received frame.
*/
skb_put(skb, framelen);
@@ -1488,7 +1488,7 @@ static int ipg_nic_rx(struct net_device *dev)
}
/*
- * If there are more RFDs to proces and the allocated amount of RFD
+ * If there are more RFDs to process and the allocated amount of RFD
* processing time has expired, assert Interrupt Requested to make
* sure we come back to process the remaining RFDs.
*/
@@ -1886,7 +1886,7 @@ static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
/* Request TxComplete interrupts at an interval defined
* by the constant IPG_FRAMESBETWEENTXCOMPLETES.
* Request TxComplete interrupt for every frame
- * if in 10Mbps mode to accomodate problem with 10Mbps
+ * if in 10Mbps mode to accommodate problem with 10Mbps
* processing.
*/
if (sp->tenmbpsmode)
@@ -2098,7 +2098,7 @@ static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
struct ipg_nic_private *sp = netdev_priv(dev);
int err;
- /* Function to accomodate changes to Maximum Transfer Unit
+ /* Function to accommodate changes to Maximum Transfer Unit
* (or MTU) of IPG NIC. Cannot use default function since
* the default will not allow for MTU > 1500 bytes.
*/
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 92631eb6f6a3..872183f29ec4 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -76,7 +76,7 @@ static int ali_ircc_probe_53(ali_chip_t *chip, chipio_t *info);
static int ali_ircc_init_43(ali_chip_t *chip, chipio_t *info);
static int ali_ircc_init_53(ali_chip_t *chip, chipio_t *info);
-/* These are the currently known ALi sourth-bridge chipsets, the only one difference
+/* These are the currently known ALi south-bridge chipsets, the only one difference
* is that M1543C doesn't support HP HDSL-3600
*/
static ali_chip_t chips[] =
@@ -1108,7 +1108,7 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
outb(lcr, iobase+UART_LCR); /* Set 8N1 */
outb(fcr, iobase+UART_FCR); /* Enable FIFO's */
- /* without this, the conection will be broken after come back from FIR speed,
+ /* without this, the connection will be broken after come back from FIR speed,
but with this, the SIR connection is harder to established */
outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index f81d944fc360..174cafad2c1a 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -56,7 +56,7 @@
/* do_probe module parameter Enable this code */
/* Probe code is very useful for understanding how the hardware works */
/* Use it with various combinations of TT_LEN, RX_LEN */
-/* Strongly recomended, disable if the probe fails on your machine */
+/* Strongly recommended, disable if the probe fails on your machine */
/* and send me <james@fishsoup.dhs.org> the output of dmesg */
#define USE_PROBE 1
#undef USE_PROBE
diff --git a/drivers/net/irda/donauboe.h b/drivers/net/irda/donauboe.h
index 77fcf4459161..d92d54e839b9 100644
--- a/drivers/net/irda/donauboe.h
+++ b/drivers/net/irda/donauboe.h
@@ -51,7 +51,7 @@
/* The documentation for this chip is allegedly released */
/* However I have not seen it, not have I managed to contact */
-/* anyone who has. HOWEVER the chip bears a striking resemblence */
+/* anyone who has. HOWEVER the chip bears a striking resemblance */
/* to the IrDA controller in the Toshiba RISC TMPR3922 chip */
/* the documentation for this is freely available at */
/* http://www.madingley.org/james/resources/toshoboe/TMPR3922.pdf */
diff --git a/drivers/net/irda/girbil-sir.c b/drivers/net/irda/girbil-sir.c
index a31b8fa8aaa9..96cdecff349d 100644
--- a/drivers/net/irda/girbil-sir.c
+++ b/drivers/net/irda/girbil-sir.c
@@ -38,7 +38,7 @@ static int girbil_change_speed(struct sir_dev *dev, unsigned speed);
/* Control register 1 */
#define GIRBIL_TXEN 0x01 /* Enable transmitter */
#define GIRBIL_RXEN 0x02 /* Enable receiver */
-#define GIRBIL_ECAN 0x04 /* Cancel self emmited data */
+#define GIRBIL_ECAN 0x04 /* Cancel self emitted data */
#define GIRBIL_ECHO 0x08 /* Echo control characters */
/* LED Current Register (0x2) */
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index e4ea61944c22..d9267cb98a23 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -370,7 +370,7 @@ static void speed_bulk_callback(struct urb *urb)
/* urb is now available */
//urb->status = 0; -> tested above
- /* New speed and xbof is now commited in hardware */
+ /* New speed and xbof is now committed in hardware */
self->new_speed = -1;
self->new_xbofs = -1;
@@ -602,7 +602,7 @@ static void write_bulk_callback(struct urb *urb)
IRDA_DEBUG(1, "%s(), Changing speed now...\n", __func__);
irda_usb_change_speed_xbofs(self);
} else {
- /* New speed and xbof is now commited in hardware */
+ /* New speed and xbof is now committed in hardware */
self->new_speed = -1;
self->new_xbofs = -1;
/* Done, waiting for next packet */
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index cc821de2c966..be52bfed66a9 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -588,7 +588,7 @@ static int mcs_speed_change(struct mcs_cb *mcs)
mcs_get_reg(mcs, MCS_MODE_REG, &rval);
- /* MINRXPW values recomended by MosChip */
+ /* MINRXPW values recommended by MosChip */
if (mcs->new_speed <= 115200) {
rval &= ~MCS_FIR;
@@ -799,7 +799,7 @@ static void mcs_receive_irq(struct urb *urb)
ret = usb_submit_urb(urb, GFP_ATOMIC);
}
-/* Transmit callback funtion. */
+/* Transmit callback function. */
static void mcs_send_irq(struct urb *urb)
{
struct mcs_cb *mcs = urb->context;
@@ -811,7 +811,7 @@ static void mcs_send_irq(struct urb *urb)
netif_wake_queue(ndev);
}
-/* Transmit callback funtion. */
+/* Transmit callback function. */
static netdev_tx_t mcs_hard_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 559fe854d76d..7a963d4e6d06 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -716,7 +716,7 @@ static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info)
int reg, com = 0;
int pnp;
- /* Read funtion enable register (FER) */
+ /* Read function enable register (FER) */
outb(CFG_338_FER, cfg_base);
reg = inb(cfg_base+1);
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h
index 7ba7738759b9..32fa58211fad 100644
--- a/drivers/net/irda/nsc-ircc.h
+++ b/drivers/net/irda/nsc-ircc.h
@@ -135,7 +135,7 @@
#define LSR_TXRDY 0x20 /* Transmitter ready */
#define LSR_TXEMP 0x40 /* Transmitter empty */
-#define ASCR 0x07 /* Auxillary Status and Control Register */
+#define ASCR 0x07 /* Auxiliary Status and Control Register */
#define ASCR_RXF_TOUT 0x01 /* Rx FIFO timeout */
#define ASCR_FEND_INF 0x02 /* Frame end bytes in rx FIFO */
#define ASCR_S_EOT 0x04 /* Set end of transmission */
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index c192c31e4c5c..001ed0a255f6 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -40,7 +40,7 @@
#define ICCR0_AME (1 << 7) /* Address match enable */
#define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */
-#define ICCR0_RIE (1 << 5) /* Recieve FIFO interrupt enable */
+#define ICCR0_RIE (1 << 5) /* Receive FIFO interrupt enable */
#define ICCR0_RXE (1 << 4) /* Receive enable */
#define ICCR0_TXE (1 << 3) /* Transmit enable */
#define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */
@@ -483,7 +483,7 @@ static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
}
if (icsr0 & ICSR0_EIF) {
- /* An error in FIFO occured, or there is a end of frame */
+ /* An error in FIFO occurred, or there is a end of frame */
pxa_irda_fir_irq_eif(si, dev, icsr0);
}
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 1c1677cfea29..69b5707db369 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -222,19 +222,19 @@ static void smsc_ircc_set_transceiver_for_speed(struct smsc_ircc_cb *self, u32 s
static void smsc_ircc_sir_wait_hw_transmitter_finish(struct smsc_ircc_cb *self);
/* Probing */
-static int __init smsc_ircc_look_for_chips(void);
-static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
-static int __init smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
-static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
-static int __init smsc_superio_fdc(unsigned short cfg_base);
-static int __init smsc_superio_lpc(unsigned short cfg_base);
+static int smsc_ircc_look_for_chips(void);
+static const struct smsc_chip * smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type);
+static int smsc_superio_flat(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int smsc_superio_paged(const struct smsc_chip *chips, unsigned short cfg_base, char *type);
+static int smsc_superio_fdc(unsigned short cfg_base);
+static int smsc_superio_lpc(unsigned short cfg_base);
#ifdef CONFIG_PCI
-static int __init preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
-static int __init preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
-static void __init preconfigure_ali_port(struct pci_dev *dev,
+static int preconfigure_smsc_chip(struct smsc_ircc_subsystem_configuration *conf);
+static int preconfigure_through_82801(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static void preconfigure_ali_port(struct pci_dev *dev,
unsigned short port);
-static int __init preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
-static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+static int preconfigure_through_ali(struct pci_dev *dev, struct smsc_ircc_subsystem_configuration *conf);
+static int smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
unsigned short ircc_fir,
unsigned short ircc_sir,
unsigned char ircc_dma,
@@ -366,7 +366,7 @@ static inline void register_bank(int iobase, int bank)
}
/* PNP hotplug support */
-static const struct pnp_device_id smsc_ircc_pnp_table[] = {
+static const struct pnp_device_id smsc_ircc_pnp_table[] __devinitconst = {
{ .id = "SMCf010", .driver_data = 0 },
/* and presumably others */
{ }
@@ -515,7 +515,7 @@ static const struct net_device_ops smsc_ircc_netdev_ops = {
* Try to open driver instance
*
*/
-static int __init smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
+static int __devinit smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma, u8 irq)
{
struct smsc_ircc_cb *self;
struct net_device *dev;
@@ -1582,7 +1582,7 @@ static irqreturn_t smsc_ircc_interrupt_sir(struct net_device *dev)
int iobase;
int iir, lsr;
- /* Already locked comming here in smsc_ircc_interrupt() */
+ /* Already locked coming here in smsc_ircc_interrupt() */
/*spin_lock(&self->lock);*/
iobase = self->io.sir_base;
@@ -2273,7 +2273,7 @@ static int __init smsc_superio_paged(const struct smsc_chip *chips, unsigned sho
}
-static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
+static int __devinit smsc_access(unsigned short cfg_base, unsigned char reg)
{
IRDA_DEBUG(1, "%s\n", __func__);
@@ -2281,7 +2281,7 @@ static int __init smsc_access(unsigned short cfg_base, unsigned char reg)
return inb(cfg_base) != reg ? -1 : 0;
}
-static const struct smsc_chip * __init smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
+static const struct smsc_chip * __devinit smsc_ircc_probe(unsigned short cfg_base, u8 reg, const struct smsc_chip *chip, char *type)
{
u8 devid, xdevid, rev;
@@ -2406,7 +2406,7 @@ static int __init smsc_superio_lpc(unsigned short cfg_base)
#ifdef CONFIG_PCI
#define PCIID_VENDOR_INTEL 0x8086
#define PCIID_VENDOR_ALI 0x10b9
-static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __initdata = {
+static const struct smsc_ircc_subsystem_configuration subsystem_configurations[] __devinitconst = {
/*
* Subsystems needing entries:
* 0x10b9:0x1533 0x103c:0x0850 HP nx9010 family
@@ -2532,7 +2532,7 @@ static struct smsc_ircc_subsystem_configuration subsystem_configurations[] __ini
* (FIR port, SIR port, FIR DMA, FIR IRQ)
* through the chip configuration port.
*/
-static int __init preconfigure_smsc_chip(struct
+static int __devinit preconfigure_smsc_chip(struct
smsc_ircc_subsystem_configuration
*conf)
{
@@ -2633,7 +2633,7 @@ static int __init preconfigure_smsc_chip(struct
* or Intel 82801DB/DBL (ICH4/ICH4-L) LPC Interface Bridge.
* They all work the same way!
*/
-static int __init preconfigure_through_82801(struct pci_dev *dev,
+static int __devinit preconfigure_through_82801(struct pci_dev *dev,
struct
smsc_ircc_subsystem_configuration
*conf)
@@ -2786,7 +2786,7 @@ static int __init preconfigure_through_82801(struct pci_dev *dev,
* This is based on reverse-engineering since ALi does not
* provide any data sheet for the 1533 chip.
*/
-static void __init preconfigure_ali_port(struct pci_dev *dev,
+static void __devinit preconfigure_ali_port(struct pci_dev *dev,
unsigned short port)
{
unsigned char reg;
@@ -2824,7 +2824,7 @@ static void __init preconfigure_ali_port(struct pci_dev *dev,
IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
}
-static int __init preconfigure_through_ali(struct pci_dev *dev,
+static int __devinit preconfigure_through_ali(struct pci_dev *dev,
struct
smsc_ircc_subsystem_configuration
*conf)
@@ -2837,7 +2837,7 @@ static int __init preconfigure_through_ali(struct pci_dev *dev,
return preconfigure_smsc_chip(conf);
}
-static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
+static int __devinit smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
unsigned short ircc_fir,
unsigned short ircc_sir,
unsigned char ircc_dma,
@@ -2849,7 +2849,7 @@ static int __init smsc_ircc_preconfigure_subsystems(unsigned short ircc_cfg,
int ret = 0;
for_each_pci_dev(dev) {
- struct smsc_ircc_subsystem_configuration *conf;
+ const struct smsc_ircc_subsystem_configuration *conf;
/*
* Cache the subsystem vendor/device:
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 186cd28a61cc..f504b262ba36 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -29,7 +29,7 @@ F02 Oct/28/02: Add SB device ID for 3147 and 3177.
2004-02-16: <sda@bdit.de>
- Removed unneeded 'legacy' pci stuff.
-- Make sure SIR mode is set (hw_init()) before calling mode-dependant stuff.
+- Make sure SIR mode is set (hw_init()) before calling mode-dependent stuff.
- On speed change from core, don't send SIR frame with new speed.
Use current speed and change speeds later.
- Make module-param dongle_id actually work.
@@ -334,7 +334,7 @@ static __devinit int via_ircc_open(struct pci_dev *pdev, chipio_t * info,
self->io.dongle_id = dongle_id;
/* The only value we must override it the baudrate */
- /* Maximum speeds and capabilities are dongle-dependant. */
+ /* Maximum speeds and capabilities are dongle-dependent. */
switch( self->io.dongle_id ){
case 0x0d:
self->qos.baud_rate.bits =
diff --git a/drivers/net/irda/vlsi_ir.h b/drivers/net/irda/vlsi_ir.h
index d66fab854bf1..a076eb125349 100644
--- a/drivers/net/irda/vlsi_ir.h
+++ b/drivers/net/irda/vlsi_ir.h
@@ -209,7 +209,7 @@ enum vlsi_pio_irintr {
IRINTR_ACTEN = 0x80, /* activity interrupt enable */
IRINTR_ACTIVITY = 0x40, /* activity monitor (traffic detected) */
IRINTR_RPKTEN = 0x20, /* receive packet interrupt enable*/
- IRINTR_RPKTINT = 0x10, /* rx-packet transfered from fifo to memory finished */
+ IRINTR_RPKTINT = 0x10, /* rx-packet transferred from fifo to memory finished */
IRINTR_TPKTEN = 0x08, /* transmit packet interrupt enable */
IRINTR_TPKTINT = 0x04, /* last bit of tx-packet+crc shifted to ir-pulser */
IRINTR_OE_EN = 0x02, /* UART rx fifo overrun error interrupt enable */
@@ -739,7 +739,7 @@ typedef struct vlsi_irda_dev {
/* the remapped error flags we use for returning from frame
* post-processing in vlsi_process_tx/rx() after it was completed
* by the hardware. These functions either return the >=0 number
- * of transfered bytes in case of success or the negative (-)
+ * of transferred bytes in case of success or the negative (-)
* of the or'ed error flags.
*/
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 8f3df044e81e..49e8408f05fc 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -157,9 +157,6 @@ struct ixgb_adapter {
u16 link_duplex;
struct work_struct tx_timeout_task;
- struct timer_list blink_timer;
- unsigned long led_status;
-
/* TX */
struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
unsigned int restart_queue;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index cc53aa1541ba..6da890b9534c 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -104,10 +104,10 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_EXTERNAL;
if (netif_carrier_ok(adapter->netdev)) {
- ecmd->speed = SPEED_10000;
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
ecmd->duplex = DUPLEX_FULL;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
@@ -129,9 +129,10 @@ static int
ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
+ u32 speed = ethtool_cmd_speed(ecmd);
if (ecmd->autoneg == AUTONEG_ENABLE ||
- ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
+ (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
if (netif_running(adapter->netdev)) {
@@ -610,45 +611,23 @@ err_setup_rx:
return err;
}
-/* toggle LED 4 times per second = 2 "blinks" per second */
-#define IXGB_ID_INTERVAL (HZ/4)
-
-/* bit defines for adapter->led_status */
-#define IXGB_LED_ON 0
-
-static void
-ixgb_led_blink_callback(unsigned long data)
-{
- struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
-
- if (test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
- ixgb_led_off(&adapter->hw);
- else
- ixgb_led_on(&adapter->hw);
-
- mod_timer(&adapter->blink_timer, jiffies + IXGB_ID_INTERVAL);
-}
-
static int
-ixgb_phys_id(struct net_device *netdev, u32 data)
+ixgb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- if (!data)
- data = INT_MAX;
-
- if (!adapter->blink_timer.function) {
- init_timer(&adapter->blink_timer);
- adapter->blink_timer.function = ixgb_led_blink_callback;
- adapter->blink_timer.data = (unsigned long)adapter;
- }
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 2;
- mod_timer(&adapter->blink_timer, jiffies);
+ case ETHTOOL_ID_ON:
+ ixgb_led_on(&adapter->hw);
+ break;
- msleep_interruptible(data * 1000);
- del_timer_sync(&adapter->blink_timer);
- ixgb_led_off(&adapter->hw);
- clear_bit(IXGB_LED_ON, &adapter->led_status);
+ case ETHTOOL_ID_OFF:
+ case ETHTOOL_ID_INACTIVE:
+ ixgb_led_off(&adapter->hw);
+ }
return 0;
}
@@ -766,7 +745,7 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
.set_msglevel = ixgb_set_msglevel,
.set_tso = ixgb_set_tso,
.get_strings = ixgb_get_strings,
- .phys_id = ixgb_phys_id,
+ .set_phys_id = ixgb_set_phys_id,
.get_sset_count = ixgb_get_sset_count,
.get_ethtool_stats = ixgb_get_ethtool_stats,
.get_flags = ethtool_op_get_flags,
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 8d468028bb55..e467b20ed1f0 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -106,6 +106,7 @@
#define IXGBE_MAX_VF_FUNCTIONS 64
#define IXGBE_MAX_VFTA_ENTRIES 128
#define MAX_EMULATION_MAC_ADDRS 16
+#define IXGBE_MAX_PF_MACVLANS 15
#define VMDQ_P(p) ((p) + adapter->num_vfs)
struct vf_data_storage {
@@ -121,6 +122,15 @@ struct vf_data_storage {
u16 tx_rate;
};
+struct vf_macvlans {
+ struct list_head l;
+ int vf;
+ int rar_entry;
+ bool free;
+ bool is_macvlan;
+ u8 vf_macvlan[ETH_ALEN];
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
@@ -331,10 +341,52 @@ struct ixgbe_q_vector {
/* board specific private data structure */
struct ixgbe_adapter {
- struct timer_list watchdog_timer;
+ unsigned long state;
+
+ /* Some features need tri-state capability,
+ * thus the additional *_CAPABLE flags.
+ */
+ u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
+#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
+#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
+#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
+#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
+#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
+#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
+#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
+#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
+#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 23)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 24)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 25)
+#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 26)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 27)
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 28)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 29)
+
+ u32 flags2;
+#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
+#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
+#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
+#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
+#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
+#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
+
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 bd_number;
- struct work_struct reset_task;
struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
/* DCB parameters */
@@ -377,43 +429,6 @@ struct ixgbe_adapter {
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
- /* Some features need tri-state capability,
- * thus the additional *_CAPABLE flags.
- */
- u32 flags;
-#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
-#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 1)
-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 2)
-#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 3)
-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 4)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 6)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 7)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 8)
-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 9)
-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 11)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 12)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 13)
-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 14)
-#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 16)
-#define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 17)
-#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 18)
-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
-#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 23)
-#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 24)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 25)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 26)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 27)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 28)
-#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 29)
-#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 30)
-
- u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
-#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
/* default to trying for four seconds */
#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
@@ -434,7 +449,6 @@ struct ixgbe_adapter {
u32 rx_eitr_param;
u32 tx_eitr_param;
- unsigned long state;
u64 tx_busy;
unsigned int tx_ring_count;
unsigned int rx_ring_count;
@@ -443,15 +457,12 @@ struct ixgbe_adapter {
bool link_up;
unsigned long link_check_timeout;
- struct work_struct watchdog_task;
- struct work_struct sfp_task;
- struct timer_list sfp_timer;
- struct work_struct multispeed_fiber_task;
- struct work_struct sfp_config_module_task;
+ struct work_struct service_task;
+ struct timer_list service_timer;
u32 fdir_pballoc;
u32 atr_sample_rate;
+ unsigned long fdir_overflow; /* number of times ATR was backed off */
spinlock_t fdir_perfect_lock;
- struct work_struct fdir_reinit_task;
#ifdef IXGBE_FCOE
struct ixgbe_fcoe fcoe;
#endif /* IXGBE_FCOE */
@@ -461,7 +472,7 @@ struct ixgbe_adapter {
u16 eeprom_version;
int node;
- struct work_struct check_overtemp_task;
+ u32 led_reg;
u32 interrupt_event;
char lsc_int_name[IFNAMSIZ + 9];
@@ -470,13 +481,17 @@ struct ixgbe_adapter {
unsigned int num_vfs;
struct vf_data_storage *vfinfo;
int vf_rate_link_speed;
+ struct vf_macvlans vf_mvs;
+ struct vf_macvlans *mv_list;
+ bool antispoofing_enabled;
};
enum ixbge_state_t {
__IXGBE_TESTING,
__IXGBE_RESETTING,
__IXGBE_DOWN,
- __IXGBE_SFP_MODULE_NOT_FOUND
+ __IXGBE_SERVICE_SCHED,
+ __IXGBE_IN_SFP_INIT,
};
struct ixgbe_rsc_cb {
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 845c679c8b87..8179e5060a18 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -37,6 +37,7 @@
#define IXGBE_82598_RAR_ENTRIES 16
#define IXGBE_82598_MC_TBL_SIZE 128
#define IXGBE_82598_VFT_TBL_SIZE 128
+#define IXGBE_82598_RX_PB_SIZE 512
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -197,14 +198,35 @@ out:
* @hw: pointer to hardware structure
*
* Starts the hardware using the generic start_hw function.
- * Then set pcie completion timeout
+ * Disables relaxed ordering Then set pcie completion timeout
+ *
**/
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
+ u32 regval;
+ u32 i;
s32 ret_val = 0;
ret_val = ixgbe_start_hw_generic(hw);
+ /* Disable relaxed ordering */
+ for (i = 0; ((i < hw->mac.max_tx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+ regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+ }
+
+ for (i = 0; ((i < hw->mac.max_rx_queues) &&
+ (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ hw->mac.rx_pb_size = IXGBE_82598_RX_PB_SIZE;
+
/* set the completion timeout for interface */
if (ret_val == 0)
ixgbe_set_pcie_completion_timeout(hw);
@@ -1064,7 +1086,7 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
break;
- msleep(10);
+ usleep_range(10000, 20000);
}
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
@@ -1188,6 +1210,38 @@ out:
return physical_layer;
}
+/**
+ * ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
+ * port devices.
+ * @hw: pointer to the HW structure
+ *
+ * Calls common function and corrects issue with some single port devices
+ * that enable LAN1 but not LAN0.
+ **/
+static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bus_info *bus = &hw->bus;
+ u16 pci_gen = 0;
+ u16 pci_ctrl2 = 0;
+
+ ixgbe_set_lan_id_multi_port_pcie(hw);
+
+ /* check if LAN0 is disabled */
+ hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
+ if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
+
+ hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
+
+ /* if LAN0 is completely disabled force function to 0 */
+ if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
+ !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
+
+ bus->func = 0;
+ }
+ }
+}
+
static struct ixgbe_mac_operations mac_ops_82598 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82598,
@@ -1199,7 +1253,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.stop_adapter = &ixgbe_stop_adapter_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
- .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
+ .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598,
.read_analog_reg8 = &ixgbe_read_analog_reg8_82598,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82598,
.setup_link = &ixgbe_setup_mac_link_82598,
@@ -1227,6 +1281,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
+ .read_buffer = &ixgbe_read_eerd_buffer_generic,
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 00aeba385a2f..8ee661245af3 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -38,6 +38,7 @@
#define IXGBE_82599_RAR_ENTRIES 128
#define IXGBE_82599_MC_TBL_SIZE 128
#define IXGBE_82599_VFT_TBL_SIZE 128
+#define IXGBE_82599_RX_PB_SIZE 512
static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
@@ -61,6 +62,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
bool autoneg,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
{
@@ -86,7 +88,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if ((mac->ops.get_media_type(hw) ==
ixgbe_media_type_backplane) &&
(hw->phy.smart_speed == ixgbe_smart_speed_auto ||
- hw->phy.smart_speed == ixgbe_smart_speed_on))
+ hw->phy.smart_speed == ixgbe_smart_speed_on) &&
+ !ixgbe_verify_lesm_fw_enabled_82599(hw))
mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
else
mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
@@ -107,7 +110,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
&data_offset);
-
if (ret_val != 0)
goto setup_sfp_out;
@@ -127,9 +129,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
}
/* Release the semaphore */
- ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
- /* Delay obtaining semaphore again to allow FW access */
- msleep(hw->eeprom.semaphore_delay);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /*
+ * Delay obtaining semaphore again to allow FW access,
+ * semaphore_delay is in ms usleep_range needs us.
+ */
+ usleep_range(hw->eeprom.semaphore_delay * 1000,
+ hw->eeprom.semaphore_delay * 2000);
/* Now restart DSP by setting Restart_AN and clearing LMS */
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
@@ -138,7 +144,7 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
- msleep(4);
+ usleep_range(4000, 8000);
reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
break;
@@ -353,6 +359,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP:
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
+ case IXGBE_DEV_ID_82599_SFP_SF2:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_82599_CX4:
@@ -361,6 +368,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_T3_LOM:
media_type = ixgbe_media_type_copper;
break;
+ case IXGBE_DEV_ID_82599_LS:
+ media_type = ixgbe_media_type_fiber_lco;
+ break;
default:
media_type = ixgbe_media_type_unknown;
break;
@@ -486,7 +496,7 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
*
* Set the link speed in the AUTOC register and restarts link.
**/
-s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg,
bool autoneg_wait_to_complete)
@@ -1176,7 +1186,7 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
IXGBE_FDIRCTRL_INIT_DONE)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL)
hw_dbg(hw, "Flow Director Signature poll time exceeded!\n");
@@ -1271,7 +1281,7 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
IXGBE_FDIRCTRL_INIT_DONE)
break;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (i >= IXGBE_FDIR_INIT_DONE_POLL)
hw_dbg(hw, "Flow Director Perfect poll time exceeded!\n");
@@ -1740,30 +1750,29 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
* ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
- * Starts the hardware using the generic start_hw function.
- * Then performs device-specific:
- * Clears the rate limiter registers.
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
**/
static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
{
- u32 q_num;
- s32 ret_val;
+ s32 ret_val = 0;
ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != 0)
+ goto out;
- /* Clear the rate limiters */
- for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) {
- IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, q_num);
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
- }
- IXGBE_WRITE_FLUSH(hw);
+ ret_val = ixgbe_start_hw_gen2(hw);
+ if (ret_val != 0)
+ goto out;
/* We need to run link autotry after the driver loads */
hw->mac.autotry_restart = true;
+ hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE;
if (ret_val == 0)
ret_val = ixgbe_verify_fw_version_82599(hw);
-
+out:
return ret_val;
}
@@ -1775,7 +1784,7 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
* If PHY already detected, maintains current PHY type in hw struct,
* otherwise executes the PHY detection routine.
**/
-s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
@@ -1968,21 +1977,6 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
}
/**
- * ixgbe_get_device_caps_82599 - Get additional device capabilities
- * @hw: pointer to hardware structure
- * @device_caps: the EEPROM word with the extra device capabilities
- *
- * This function will read the EEPROM location for the device capabilities,
- * and return the word through device_caps.
- **/
-static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
-{
- hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
-
- return 0;
-}
-
-/**
* ixgbe_verify_fw_version_82599 - verify fw version for 82599
* @hw: pointer to hardware structure
*
@@ -2030,6 +2024,110 @@ fw_version_out:
return status;
}
+/**
+ * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
+ * @hw: pointer to hardware structure
+ *
+ * Returns true if the LESM FW module is present and enabled. Otherwise
+ * returns false. Smart Speed must be disabled if LESM FW module is enabled.
+ **/
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+{
+ bool lesm_enabled = false;
+ u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
+ s32 status;
+
+ /* get the offset to the Firmware Module block */
+ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+ if ((status != 0) ||
+ (fw_offset == 0) || (fw_offset == 0xFFFF))
+ goto out;
+
+ /* get the offset to the LESM Parameters block */
+ status = hw->eeprom.ops.read(hw, (fw_offset +
+ IXGBE_FW_LESM_PARAMETERS_PTR),
+ &fw_lesm_param_offset);
+
+ if ((status != 0) ||
+ (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
+ goto out;
+
+ /* get the lesm state word */
+ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
+ IXGBE_FW_LESM_STATE_1),
+ &fw_lesm_state);
+
+ if ((status == 0) &&
+ (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
+ lesm_enabled = true;
+
+out:
+ return lesm_enabled;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Retrieves 16 bit word(s) read from EEPROM
+ **/
+static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
+ data);
+ else
+ ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
+ words,
+ data);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_read_eeprom_82599 - Read EEPROM word using
+ * fastest available method
+ *
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM
+ **/
+static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+ u16 offset, u16 *data)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ /*
+ * If EEPROM is detected and can be addressed using 14 bits,
+ * use EERD otherwise use bit bang
+ */
+ if ((eeprom->type == ixgbe_eeprom_spi) &&
+ (offset <= IXGBE_EERD_MAX_ADDR))
+ ret_val = ixgbe_read_eerd_generic(hw, offset, data);
+ else
+ ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
+
+ return ret_val;
+}
+
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
@@ -2040,7 +2138,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.enable_rx_dma = &ixgbe_enable_rx_dma_82599,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
- .get_device_caps = &ixgbe_get_device_caps_82599,
+ .get_device_caps = &ixgbe_get_device_caps_generic,
.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
.stop_adapter = &ixgbe_stop_adapter_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
@@ -2076,8 +2174,10 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.init_params = &ixgbe_init_eeprom_params_generic,
- .read = &ixgbe_read_eerd_generic,
+ .read = &ixgbe_read_eeprom_82599,
+ .read_buffer = &ixgbe_read_eeprom_buffer_82599,
.write = &ixgbe_write_eeprom_generic,
+ .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic,
.calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
.validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index bcd952916eb2..b894b42a741c 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -54,6 +54,13 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
+static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset);
/**
* ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -96,6 +103,45 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_start_hw_gen2 - Init sequence for common device family
+ * @hw: pointer to hw structure
+ *
+ * Performs the init sequence common to the second generation
+ * of 10 GbE devices.
+ * Devices in the second generation:
+ * 82599
+ * X540
+ **/
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+{
+ u32 i;
+ u32 regval;
+
+ /* Clear the rate limiters */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
+ }
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Disable relaxed ordering */
+ for (i = 0; i < hw->mac.max_tx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+ regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+ }
+
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+ regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+ }
+
+ return 0;
+}
+
+/**
* ixgbe_init_hw_generic - Generic hardware initialization
* @hw: pointer to hardware structure
*
@@ -464,7 +510,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
reg_val &= ~(IXGBE_RXCTRL_RXEN);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val);
IXGBE_WRITE_FLUSH(hw);
- msleep(2);
+ usleep_range(2000, 4000);
/* Clear interrupt mask to stop from interrupts being generated */
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
@@ -545,6 +591,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
/* Set default semaphore delay to 10ms which is a well
* tested value */
eeprom->semaphore_delay = 10;
+ /* Clear EEPROM page size, it will be initialized as needed */
+ eeprom->word_page_size = 0;
/*
* Check for EEPROM present first.
@@ -577,26 +625,78 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
}
/**
- * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
* @hw: pointer to hardware structure
- * @offset: offset within the EEPROM to be written to
- * @data: 16 bit word to be written to the EEPROM
+ * @offset: offset within the EEPROM to write
+ * @words: number of words
+ * @data: 16 bit word(s) to write to EEPROM
*
- * If ixgbe_eeprom_update_checksum is not called after this function, the
- * EEPROM will most likely contain an invalid checksum.
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
{
- s32 status;
- u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+ s32 status = 0;
+ u16 i, count;
hw->eeprom.ops.init_params(hw);
- if (offset >= hw->eeprom.word_size) {
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
+ /*
+ * The EEPROM page size cannot be queried from the chip. We do lazy
+ * initialization. It is worth to do that when we write large buffer.
+ */
+ if ((hw->eeprom.word_page_size == 0) &&
+ (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
+ ixgbe_detect_eeprom_page_size_generic(hw, offset);
+
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != 0)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @words: number of word(s)
+ * @data: 16 bit word(s) to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word;
+ u16 page_size;
+ u16 i;
+ u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
/* Prepare the EEPROM for writing */
status = ixgbe_acquire_eeprom(hw);
@@ -608,62 +708,147 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
}
if (status == 0) {
- ixgbe_standby_eeprom(hw);
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
- /* Send the WRITE ENABLE command (8 bit opcode ) */
- ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_WREN_OPCODE_SPI,
- IXGBE_EEPROM_OPCODE_BITS);
+ /* Send the WRITE ENABLE command (8 bit opcode ) */
+ ixgbe_shift_out_eeprom_bits(hw,
+ IXGBE_EEPROM_WREN_OPCODE_SPI,
+ IXGBE_EEPROM_OPCODE_BITS);
- ixgbe_standby_eeprom(hw);
+ ixgbe_standby_eeprom(hw);
- /*
- * Some SPI eeproms use the 8th address bit embedded in the
- * opcode
- */
- if ((hw->eeprom.address_bits == 8) && (offset >= 128))
- write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the Write command (8-bit opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ page_size = hw->eeprom.word_page_size;
+
+ /* Send the data in burst via SPI*/
+ do {
+ word = data[i];
+ word = (word >> 8) | (word << 8);
+ ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+ if (page_size == 0)
+ break;
+
+ /* do not wrap around page */
+ if (((offset + i) & (page_size - 1)) ==
+ (page_size - 1))
+ break;
+ } while (++i < words);
+
+ ixgbe_standby_eeprom(hw);
+ usleep_range(10000, 20000);
+ }
+ /* Done with writing - release the EEPROM */
+ ixgbe_release_eeprom(hw);
+ }
- /* Send the Write command (8-bit opcode + addr) */
- ixgbe_shift_out_eeprom_bits(hw, write_opcode,
- IXGBE_EEPROM_OPCODE_BITS);
- ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
- hw->eeprom.address_bits);
+ return status;
+}
- /* Send the data */
- data = (data >> 8) | (data << 8);
- ixgbe_shift_out_eeprom_bits(hw, data, 16);
- ixgbe_standby_eeprom(hw);
+/**
+ * ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be written to
+ * @data: 16 bit word to be written to the EEPROM
+ *
+ * If ixgbe_eeprom_update_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
- /* Done with writing - release the EEPROM */
- ixgbe_release_eeprom(hw);
+ hw->eeprom.ops.init_params(hw);
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
}
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+
out:
return status;
}
/**
- * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
* @hw: pointer to hardware structure
* @offset: offset within the EEPROM to be read
- * @data: read 16 bit value from EEPROM
+ * @words: number of word(s)
+ * @data: read 16 bit words(s) from EEPROM
*
- * Reads 16 bit value from EEPROM through bit-bang method
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
**/
-s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 *data)
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
{
- s32 status;
- u16 word_in;
- u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ s32 status = 0;
+ u16 i, count;
hw->eeprom.ops.init_params(hw);
- if (offset >= hw->eeprom.word_size) {
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset + words > hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
+ /*
+ * We cannot hold synchronization semaphores for too long
+ * to avoid other entity starvation. However it is more efficient
+ * to read in bursts than synchronizing access for each word.
+ */
+ for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+ count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
+ count, &data[i]);
+
+ if (status != 0)
+ break;
+ }
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @words: number of word(s)
+ * @data: read 16 bit word(s) from EEPROM
+ *
+ * Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+ u16 word_in;
+ u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+ u16 i;
+
/* Prepare the EEPROM for reading */
status = ixgbe_acquire_eeprom(hw);
@@ -675,29 +860,145 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
}
if (status == 0) {
- ixgbe_standby_eeprom(hw);
+ for (i = 0; i < words; i++) {
+ ixgbe_standby_eeprom(hw);
+ /*
+ * Some SPI eeproms use the 8th address bit embedded
+ * in the opcode
+ */
+ if ((hw->eeprom.address_bits == 8) &&
+ ((offset + i) >= 128))
+ read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+ /* Send the READ command (opcode + addr) */
+ ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+ IXGBE_EEPROM_OPCODE_BITS);
+ ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+ hw->eeprom.address_bits);
+
+ /* Read the data. */
+ word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+ data[i] = (word_in >> 8) | (word_in << 8);
+ }
- /*
- * Some SPI eeproms use the 8th address bit embedded in the
- * opcode
- */
- if ((hw->eeprom.address_bits == 8) && (offset >= 128))
- read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+ /* End this read operation */
+ ixgbe_release_eeprom(hw);
+ }
- /* Send the READ command (opcode + addr) */
- ixgbe_shift_out_eeprom_bits(hw, read_opcode,
- IXGBE_EEPROM_OPCODE_BITS);
- ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2),
- hw->eeprom.address_bits);
+ return status;
+}
+
+/**
+ * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be read
+ * @data: read 16 bit value from EEPROM
+ *
+ * Reads 16 bit value from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 *data)
+{
+ s32 status;
- /* Read the data. */
- word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
- *data = (word_in >> 8) | (word_in << 8);
+ hw->eeprom.ops.init_params(hw);
- /* End this read operation */
- ixgbe_release_eeprom(hw);
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
+ }
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+
+out:
+ return status;
+}
+
+/**
+ * ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of word(s)
+ * @data: 16 bit word(s) from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ u32 eerd;
+ s32 status = 0;
+ u32 i;
+
+ hw->eeprom.ops.init_params(hw);
+
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ if (offset >= hw->eeprom.word_size) {
+ status = IXGBE_ERR_EEPROM;
+ goto out;
}
+ for (i = 0; i < words; i++) {
+ eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+ IXGBE_EEPROM_RW_REG_START;
+
+ IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
+
+ if (status == 0) {
+ data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+ IXGBE_EEPROM_RW_REG_DATA);
+ } else {
+ hw_dbg(hw, "Eeprom read timed out\n");
+ goto out;
+ }
+ }
+out:
+ return status;
+}
+
+/**
+ * ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
+ * @hw: pointer to hardware structure
+ * @offset: offset within the EEPROM to be used as a scratch pad
+ *
+ * Discover EEPROM page size by writing marching data at given offset.
+ * This function is called only when we are writing a new large buffer
+ * at given offset so the data would be overwritten anyway.
+ **/
+static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+ u16 offset)
+{
+ u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
+ s32 status = 0;
+ u16 i;
+
+ for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
+ data[i] = i;
+
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
+ status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
+ IXGBE_EEPROM_PAGE_SIZE_MAX, data);
+ hw->eeprom.word_page_size = 0;
+ if (status != 0)
+ goto out;
+
+ status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+ if (status != 0)
+ goto out;
+
+ /*
+ * When writing in burst more than the actual page size
+ * EEPROM address wraps around current page.
+ */
+ hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
+
+ hw_dbg(hw, "Detected EEPROM page size = %d words.",
+ hw->eeprom.word_page_size);
out:
return status;
}
@@ -712,33 +1013,75 @@ out:
**/
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
- u32 eerd;
- s32 status;
+ return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
+}
+
+/**
+ * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ u32 eewr;
+ s32 status = 0;
+ u16 i;
hw->eeprom.ops.init_params(hw);
+ if (words == 0) {
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto out;
+ }
+
if (offset >= hw->eeprom.word_size) {
status = IXGBE_ERR_EEPROM;
goto out;
}
- eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
- IXGBE_EEPROM_RW_REG_START;
+ for (i = 0; i < words; i++) {
+ eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+ (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
+ IXGBE_EEPROM_RW_REG_START;
- IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
- status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
- if (status == 0)
- *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
- IXGBE_EEPROM_RW_REG_DATA);
- else
- hw_dbg(hw, "Eeprom read timed out\n");
+ IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+ status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+ if (status != 0) {
+ hw_dbg(hw, "Eeprom write EEWR timed out\n");
+ goto out;
+ }
+ }
out:
return status;
}
/**
+ * ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
+}
+
+/**
* ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
* @hw: pointer to hardware structure
* @ee_reg: EEPROM flag for polling
@@ -746,7 +1089,7 @@ out:
* Polls the status bit (bit 1) of the EERD or EEWR to determine when the
* read or write is done respectively.
**/
-s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
{
u32 i;
u32 reg;
@@ -846,6 +1189,28 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
udelay(50);
}
+ if (i == timeout) {
+ hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
+ "not granted.\n");
+ /*
+ * this release is particularly important because our attempts
+ * above to get the semaphore may have succeeded, and if there
+ * was a timeout, we should unconditionally clear the semaphore
+ * bits to free the driver to make progress
+ */
+ ixgbe_release_eeprom_semaphore(hw);
+
+ udelay(50);
+ /*
+ * one last try
+ * If the SMBI bit is 0 when we read it, then the bit will be
+ * set and we have the semaphore
+ */
+ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+ if (!(swsm & IXGBE_SWSM_SMBI))
+ status = 0;
+ }
+
/* Now get the semaphore between SW/FW through the SWESMBI bit */
if (status == 0) {
for (i = 0; i < timeout; i++) {
@@ -1112,8 +1477,12 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
- /* Delay before attempt to obtain semaphore again to allow FW access */
- msleep(hw->eeprom.semaphore_delay);
+ /*
+ * Delay before attempt to obtain semaphore again to allow FW
+ * access. semaphore_delay is in ms we need us for usleep_range
+ */
+ usleep_range(hw->eeprom.semaphore_delay * 1000,
+ hw->eeprom.semaphore_delay * 2000);
}
/**
@@ -2189,7 +2558,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
* thread currently using resource (swmask)
*/
ixgbe_release_eeprom_semaphore(hw);
- msleep(5);
+ usleep_range(5000, 10000);
timeout--;
}
@@ -2263,7 +2632,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
- msleep(10);
+ usleep_range(10000, 20000);
}
led_reg &= ~IXGBE_LED_MODE_MASK(index);
@@ -2883,3 +3252,18 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
pfvfspoof &= ~(1 << vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
+
+/**
+ * ixgbe_get_device_caps_generic - Get additional device capabilities
+ * @hw: pointer to hardware structure
+ * @device_caps: the EEPROM word with the extra device capabilities
+ *
+ * This function will read the EEPROM location for the device capabilities,
+ * and return the word through device_caps.
+ **/
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+{
+ hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
+
+ return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 508f635fc2ca..46be83cfb500 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -35,6 +35,7 @@ u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
u32 pba_num_size);
@@ -48,14 +49,22 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 *data);
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
-s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
u32 enable_addr);
@@ -89,6 +98,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 41c529fac0ab..686a17aadef3 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -36,7 +36,7 @@
/**
* ixgbe_ieee_credits - This calculates the ieee traffic class
* credits from the configured bandwidth percentages. Credits
- * are the smallest unit programable into the underlying
+ * are the smallest unit programmable into the underlying
* hardware. The IEEE 802.1Qaz specification do not use bandwidth
* groups so this is much simplified from the CEE case.
*/
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 1bc57e52cee3..771d01a60d06 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -289,7 +289,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
* Configure queue statistics registers, all queues belonging to same traffic
* class uses a single set of queue statistics counters.
*/
-s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
+static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw)
{
u32 reg = 0;
u8 i = 0;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 025af8c53ddb..d50cf78c234d 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -39,36 +39,52 @@
*/
static s32 ixgbe_dcb_config_packet_buffers_82599(struct ixgbe_hw *hw, u8 rx_pba)
{
- s32 ret_val = 0;
- u32 value = IXGBE_RXPBSIZE_64KB;
+ int num_tcs = IXGBE_MAX_PACKET_BUFFERS;
+ u32 rx_pb_size = hw->mac.rx_pb_size << IXGBE_RXPBSIZE_SHIFT;
+ u32 rxpktsize;
+ u32 txpktsize;
+ u32 txpbthresh;
u8 i = 0;
- /* Setup Rx packet buffer sizes */
- switch (rx_pba) {
- case pba_80_48:
- /* Setup the first four at 80KB */
- value = IXGBE_RXPBSIZE_80KB;
- for (; i < 4; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
- /* Setup the last four at 48KB...don't re-init i */
- value = IXGBE_RXPBSIZE_48KB;
- /* Fall Through */
- case pba_equal:
- default:
- for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), value);
-
- /* Setup Tx packet buffer sizes */
- for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i),
- IXGBE_TXPBSIZE_20KB);
- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i),
- IXGBE_TXPBTHRESH_DCB);
- }
- break;
+ /*
+ * This really means configure the first half of the TCs
+ * (Traffic Classes) to use 5/8 of the Rx packet buffer
+ * space. To determine the size of the buffer for each TC,
+ * we are multiplying the average size by 5/4 and applying
+ * it to half of the traffic classes.
+ */
+ if (rx_pba == pba_80_48) {
+ rxpktsize = (rx_pb_size * 5) / (num_tcs * 4);
+ rx_pb_size -= rxpktsize * (num_tcs / 2);
+ for (; i < (num_tcs / 2); i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ }
+
+ /* Divide the remaining Rx packet buffer evenly among the TCs */
+ rxpktsize = rx_pb_size / (num_tcs - i);
+ for (; i < num_tcs; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+
+ /*
+ * Setup Tx packet buffer and threshold equally for all TCs
+ * TXPBTHRESH register is set in K so divide by 1024 and subtract
+ * 10 since the largest packet we support is just over 9K.
+ */
+ txpktsize = IXGBE_TXPBSIZE_MAX / num_tcs;
+ txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+ for (i = 0; i < num_tcs; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+ }
+
+ /* Clear unused TCs, if any, to zero buffer size*/
+ for (; i < MAX_TRAFFIC_CLASS; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
}
- return ret_val;
+ return 0;
}
/**
@@ -285,12 +301,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en)
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, reg);
/*
* Enable Receive PFC
- * We will always honor XOFF frames we receive when
- * we are in PFC mode.
+ * 82599 will always honor XOFF frames we receive when
+ * we are in PFC mode however X540 only honors enabled
+ * traffic classes.
*/
reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
reg &= ~IXGBE_MFLCN_RFCE;
reg |= IXGBE_MFLCN_RPFCE | IXGBE_MFLCN_DPF;
+
+ if (hw->mac.type == ixgbe_mac_X540)
+ reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
+
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
} else {
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ixgbe/ixgbe_dcb_82599.h
index 148fd8b477a9..2de71a503153 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.h
@@ -92,8 +92,10 @@
#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */
#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */
#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/
#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */
+#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */
/* SECTXMINIFG DCB */
#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index fec4c724c37a..5e7ed225851a 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -347,31 +347,44 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = DCB_APP_IDTYPE_ETHTYPE,
+ .protocol = ETH_P_FCOE,
+ };
+ u8 up = dcb_getapp(netdev, &app);
int ret;
- if (!adapter->dcb_set_bitmap ||
- !(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
- return DCB_NO_HW_CHG;
-
ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
MAX_TRAFFIC_CLASS);
-
if (ret)
return DCB_NO_HW_CHG;
+ /* In IEEE mode app data must be parsed into DCBX format for
+ * hardware routines.
+ */
+ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)
+ up = (1 << up);
+
+#ifdef IXGBE_FCOE
+ if (up && (up != (1 << adapter->fcoe.up)))
+ adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
+
/*
- * Only take down the adapter if an app change occured. FCoE
+ * Only take down the adapter if an app change occurred. FCoE
* may shuffle tx rings in this case and this can not be done
* without a reset currently.
*/
if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
+
+ ixgbe_fcoe_setapp(adapter, up);
if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev);
ixgbe_clear_interrupt_scheme(adapter);
}
+#endif
if (adapter->dcb_cfg.pfc_mode_enable) {
switch (adapter->hw.mac.type) {
@@ -399,12 +412,14 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
}
+#ifdef IXGBE_FCOE
if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
ret = DCB_HW_CHG_RST;
}
+#endif
if (adapter->dcb_set_bitmap & BIT_PFC) {
u8 pfc_en;
@@ -558,68 +573,6 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
return dcb_getapp(netdev, &app);
}
-/**
- * ixgbe_dcbnl_setapp - set the DCBX application user priority
- * @netdev : the corresponding netdev
- * @idtype : identifies the id as ether type or TCP/UDP port number
- * @id: id is either ether type or TCP/UDP port number
- * @up: the 802.1p user priority bitmap
- *
- * Returns : 0 on success or 1 on error
- */
-static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
- u8 idtype, u16 id, u8 up)
-{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
- u8 rval = 1;
- struct dcb_app app = {
- .selector = idtype,
- .protocol = id,
- .priority = up
- };
-
- if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
- return rval;
-
- rval = dcb_setapp(netdev, &app);
-
- switch (idtype) {
- case DCB_APP_IDTYPE_ETHTYPE:
-#ifdef IXGBE_FCOE
- if (id == ETH_P_FCOE) {
- u8 old_tc;
-
- /* Get current programmed tc */
- old_tc = adapter->fcoe.tc;
- rval = ixgbe_fcoe_setapp(adapter, up);
-
- if (rval ||
- !(adapter->flags & IXGBE_FLAG_DCB_ENABLED) ||
- !(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
- break;
-
- /* The FCoE application priority may be changed multiple
- * times in quick sucession with switches that build up
- * TLVs. To avoid creating uneeded device resets this
- * checks the actual HW configuration and clears
- * BIT_APP_UPCHG if a HW configuration change is not
- * need
- */
- if (old_tc == adapter->fcoe.tc)
- adapter->dcb_set_bitmap &= ~BIT_APP_UPCHG;
- else
- adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
- }
-#endif
- break;
- case DCB_APP_IDTYPE_PORTNUM:
- break;
- default:
- break;
- }
- return rval;
-}
-
static int ixgbe_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
@@ -745,25 +698,14 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
-#ifdef IXGBE_FCOE
- if (app->selector == 1 && app->protocol == ETH_P_FCOE) {
- if (adapter->fcoe.tc == app->priority)
- goto setapp;
- /* In IEEE mode map up to tc 1:1 */
- adapter->fcoe.tc = app->priority;
- adapter->fcoe.up = app->priority;
+ dcb_setapp(dev, app);
- /* Force hardware reset required to push FCoE
- * setup on {tx|rx}_rings
- */
- adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
+#ifdef IXGBE_FCOE
+ if (app->selector == 1 && app->protocol == ETH_P_FCOE &&
+ adapter->fcoe.tc == app->priority)
ixgbe_dcbnl_set_all(dev);
- }
-
-setapp:
#endif
- dcb_setapp(dev, app);
return 0;
}
@@ -838,7 +780,6 @@ const struct dcbnl_rtnl_ops dcbnl_ops = {
.getpfcstate = ixgbe_dcbnl_getpfcstate,
.setpfcstate = ixgbe_dcbnl_setpfcstate,
.getapp = ixgbe_dcbnl_getapp,
- .setapp = ixgbe_dcbnl_setapp,
.getdcbx = ixgbe_dcbnl_getdcbx,
.setdcbx = ixgbe_dcbnl_setdcbx,
};
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 76380a2b35aa..cb1555bc8548 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -84,6 +84,7 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
{"fdir_match", IXGBE_STAT(stats.fdirmatch)},
{"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
+ {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
{"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
{"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
{"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
@@ -102,6 +103,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
{"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
{"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
+ {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
+ {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
+ {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
+ {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
#ifdef IXGBE_FCOE
{"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
{"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
@@ -288,20 +293,20 @@ static int ixgbe_get_settings(struct net_device *netdev,
if (link_up) {
switch (link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
- ecmd->speed = SPEED_10000;
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
break;
case IXGBE_LINK_SPEED_1GB_FULL:
- ecmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
break;
case IXGBE_LINK_SPEED_100_FULL:
- ecmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
break;
default:
break;
}
ecmd->duplex = DUPLEX_FULL;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
@@ -346,9 +351,10 @@ static int ixgbe_set_settings(struct net_device *netdev,
}
} else {
/* in this case we currently only support 10Gb/FULL */
+ u32 speed = ethtool_cmd_speed(ecmd);
if ((ecmd->autoneg == AUTONEG_ENABLE) ||
(ecmd->advertising != ADVERTISED_10000baseT_Full) ||
- (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
+ (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
return -EINVAL;
}
@@ -846,11 +852,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
if (!eeprom_buff)
return -ENOMEM;
- for (i = 0; i < eeprom_len; i++) {
- if ((ret_val = hw->eeprom.ops.read(hw, first_word + i,
- &eeprom_buff[i])))
- break;
- }
+ ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
+ eeprom_buff);
/* Device's eeprom is always little-endian, word addressable */
for (i = 0; i < eeprom_len; i++)
@@ -931,7 +934,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
}
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1030,9 +1033,6 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
return IXGBE_TEST_LEN;
case ETH_SS_STATS:
return IXGBE_STATS_LEN;
- case ETH_SS_NTUPLE_FILTERS:
- return ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
- ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY;
default:
return -EOPNOTSUPP;
}
@@ -1238,46 +1238,62 @@ static const struct ixgbe_reg_test reg_test_82598[] = {
{ 0, 0, 0, 0 }
};
-static const u32 register_test_patterns[] = {
- 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
-};
-
-#define REG_PATTERN_TEST(R, M, W) \
-{ \
- u32 pat, val, before; \
- for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
- before = readl(adapter->hw.hw_addr + R); \
- writel((register_test_patterns[pat] & W), \
- (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if (val != (register_test_patterns[pat] & W & M)) { \
- e_err(drv, "pattern test reg %04X failed: got " \
- "0x%08X expected 0x%08X\n", \
- R, val, (register_test_patterns[pat] & W & M)); \
- *data = R; \
- writel(before, adapter->hw.hw_addr + R); \
- return 1; \
- } \
- writel(before, adapter->hw.hw_addr + R); \
- } \
+static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ u32 pat, val, before;
+ static const u32 test_pattern[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+
+ for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
+ before = readl(adapter->hw.hw_addr + reg);
+ writel((test_pattern[pat] & write),
+ (adapter->hw.hw_addr + reg));
+ val = readl(adapter->hw.hw_addr + reg);
+ if (val != (test_pattern[pat] & write & mask)) {
+ e_err(drv, "pattern test reg %04X failed: got "
+ "0x%08X expected 0x%08X\n",
+ reg, val, (test_pattern[pat] & write & mask));
+ *data = reg;
+ writel(before, adapter->hw.hw_addr + reg);
+ return 1;
+ }
+ writel(before, adapter->hw.hw_addr + reg);
+ }
+ return 0;
}
-#define REG_SET_AND_CHECK(R, M, W) \
-{ \
- u32 val, before; \
- before = readl(adapter->hw.hw_addr + R); \
- writel((W & M), (adapter->hw.hw_addr + R)); \
- val = readl(adapter->hw.hw_addr + R); \
- if ((W & M) != (val & M)) { \
- e_err(drv, "set/check reg %04X test failed: got 0x%08X " \
- "expected 0x%08X\n", R, (val & M), (W & M)); \
- *data = R; \
- writel(before, (adapter->hw.hw_addr + R)); \
- return 1; \
- } \
- writel(before, (adapter->hw.hw_addr + R)); \
+static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
+ u32 mask, u32 write)
+{
+ u32 val, before;
+ before = readl(adapter->hw.hw_addr + reg);
+ writel((write & mask), (adapter->hw.hw_addr + reg));
+ val = readl(adapter->hw.hw_addr + reg);
+ if ((write & mask) != (val & mask)) {
+ e_err(drv, "set/check reg %04X test failed: got 0x%08X "
+ "expected 0x%08X\n", reg, (val & mask), (write & mask));
+ *data = reg;
+ writel(before, (adapter->hw.hw_addr + reg));
+ return 1;
+ }
+ writel(before, (adapter->hw.hw_addr + reg));
+ return 0;
}
+#define REG_PATTERN_TEST(reg, mask, write) \
+ do { \
+ if (reg_pattern_test(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0) \
+
+
+#define REG_SET_AND_CHECK(reg, mask, write) \
+ do { \
+ if (reg_set_and_check(adapter, data, reg, mask, write)) \
+ return 1; \
+ } while (0) \
+
static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
{
const struct ixgbe_reg_test *test;
@@ -1328,13 +1344,13 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
switch (test->test_type) {
case PATTERN_TEST:
REG_PATTERN_TEST(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ test->mask,
+ test->write);
break;
case SET_READ_TEST:
REG_SET_AND_CHECK(test->reg + (i * 0x40),
- test->mask,
- test->write);
+ test->mask,
+ test->write);
break;
case WRITE_NO_TEST:
writel(test->write,
@@ -1343,18 +1359,18 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
break;
case TABLE32_TEST:
REG_PATTERN_TEST(test->reg + (i * 4),
- test->mask,
- test->write);
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_LO:
REG_PATTERN_TEST(test->reg + (i * 8),
- test->mask,
- test->write);
+ test->mask,
+ test->write);
break;
case TABLE64_TEST_HI:
REG_PATTERN_TEST((test->reg + 4) + (i * 8),
- test->mask,
- test->write);
+ test->mask,
+ test->write);
break;
}
}
@@ -1417,7 +1433,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Disable all the interrupts */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
- msleep(10);
+ usleep_range(10000, 20000);
/* Test each interrupt */
for (; i < 10; i++) {
@@ -1437,7 +1453,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
~mask & 0x00007FFF);
- msleep(10);
+ usleep_range(10000, 20000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1454,7 +1470,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
- msleep(10);
+ usleep_range(10000, 20000);
if (!(adapter->test_icr &mask)) {
*data = 4;
@@ -1474,7 +1490,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
~mask & 0x00007FFF);
- msleep(10);
+ usleep_range(10000, 20000);
if (adapter->test_icr) {
*data = 5;
@@ -1485,7 +1501,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Disable all the interrupts */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
- msleep(10);
+ usleep_range(10000, 20000);
/* Unhook test interrupt handler */
free_irq(irq, netdev);
@@ -1598,6 +1614,13 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 reg_data;
+ /* X540 needs to set the MACC.FLU bit to force link up */
+ if (adapter->hw.mac.type == ixgbe_mac_X540) {
+ reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC);
+ reg_data |= IXGBE_MACC_FLU;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data);
+ }
+
/* right now we only support MAC loopback in the driver */
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
/* Setup MAC loopback */
@@ -1613,7 +1636,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
IXGBE_WRITE_FLUSH(&adapter->hw);
- msleep(10);
+ usleep_range(10000, 20000);
/* Disable Atlas Tx lanes; re-enabled in reset path */
if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1999,25 +2022,30 @@ static int ixgbe_nway_reset(struct net_device *netdev)
return 0;
}
-static int ixgbe_phys_id(struct net_device *netdev, u32 data)
+static int ixgbe_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
- u32 i;
- if (!data || data > 300)
- data = 300;
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ return 2;
- for (i = 0; i < (data * 1000); i += 400) {
+ case ETHTOOL_ID_ON:
hw->mac.ops.led_on(hw, IXGBE_LED_ON);
- msleep_interruptible(200);
+ break;
+
+ case ETHTOOL_ID_OFF:
hw->mac.ops.led_off(hw, IXGBE_LED_ON);
- msleep_interruptible(200);
- }
+ break;
- /* Restore LED settings */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg);
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore LED settings */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
+ break;
+ }
return 0;
}
@@ -2230,8 +2258,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
need_reset = (data & ETH_FLAG_RXVLAN) !=
(netdev->features & NETIF_F_HW_VLAN_RX);
+ if ((data & ETH_FLAG_RXHASH) &&
+ !(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ return -EOPNOTSUPP;
+
rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
- ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
+ ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
+ ETH_FLAG_RXHASH);
if (rc)
return rc;
@@ -2465,7 +2498,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_tso = ixgbe_set_tso,
.self_test = ixgbe_diag_test,
.get_strings = ixgbe_get_strings,
- .phys_id = ixgbe_phys_id,
+ .set_phys_id = ixgbe_set_phys_id,
.get_sset_count = ixgbe_get_sset_count,
.get_ethtool_stats = ixgbe_get_ethtool_stats,
.get_coalesce = ixgbe_get_coalesce,
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index dba7d77588ef..05920726e824 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -416,8 +416,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (!ddp->udl)
goto ddp_out;
- ddp->err = (fcerr | fceofe);
- if (ddp->err)
+ if (fcerr | fceofe)
goto ddp_out;
fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT);
@@ -428,6 +427,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) {
pci_unmap_sg(adapter->pdev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
+ ddp->err = (fcerr | fceofe);
ddp->sgl = NULL;
ddp->sgc = 0;
}
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index f17e4a7ee731..2dce3d038188 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -51,8 +51,12 @@
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-
-#define DRV_VERSION "3.2.9-k2"
+#define MAJ 3
+#define MIN 3
+#define BUILD 8
+#define KFIX 2
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+ __stringify(BUILD) "-k" __stringify(KFIX)
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2011 Intel Corporation.";
@@ -120,6 +124,10 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
board_X540 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2),
+ board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS),
+ board_82599 },
/* required last entry */
{0, }
@@ -185,6 +193,22 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
}
+static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
+{
+ if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
+ !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state))
+ schedule_work(&adapter->service_task);
+}
+
+static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
+{
+ BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
+
+ /* flush memory to make sure state is correct before next watchog */
+ smp_mb__before_clear_bit();
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
+}
+
struct ixgbe_reg_info {
u32 ofs;
char *name;
@@ -644,7 +668,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
* @adapter: driver private struct
* @index: reg idx of queue to query (0-127)
*
- * Helper function to determine the traffic index for a paticular
+ * Helper function to determine the traffic index for a particular
* register index.
*
* Returns : a tc index for use in range 0-7, or 0-3
@@ -811,7 +835,19 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
-static void ixgbe_tx_timeout(struct net_device *netdev);
+/**
+ * ixgbe_tx_timeout_reset - initiate reset due to Tx timeout
+ * @adapter: driver private struct
+ **/
+static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
+{
+
+ /* Do the reset outside of interrupt context */
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ }
+}
/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
@@ -893,7 +929,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
adapter->tx_timeout_count + 1, tx_ring->queue_index);
/* schedule immediate reset if we believe we hung */
- ixgbe_tx_timeout(adapter->netdev);
+ ixgbe_tx_timeout_reset(adapter);
/* the adapter is about to reset, no point in enabling stuff */
return true;
@@ -943,8 +979,6 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
}
@@ -962,7 +996,6 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
break;
case ixgbe_mac_82599EB:
@@ -972,7 +1005,6 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
break;
default:
@@ -1061,8 +1093,14 @@ static int __ixgbe_notify_dca(struct device *dev, void *data)
return 0;
}
-
#endif /* CONFIG_IXGBE_DCA */
+
+static inline void ixgbe_rx_hash(union ixgbe_adv_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
+}
+
/**
* ixgbe_receive_skb - Send a completed packet up the stack
* @adapter: board private structure
@@ -1454,6 +1492,8 @@ static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
ixgbe_rx_checksum(adapter, rx_desc, skb);
+ if (adapter->netdev->features & NETIF_F_RXHASH)
+ ixgbe_rx_hash(rx_desc, skb);
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
@@ -1787,35 +1827,51 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
}
/**
- * ixgbe_check_overtemp_task - worker thread to check over tempurature
- * @work: pointer to work_struct containing our data
+ * ixgbe_check_overtemp_subtask - check for over tempurature
+ * @adapter: pointer to adapter
**/
-static void ixgbe_check_overtemp_task(struct work_struct *work)
+static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter)
{
- struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- check_overtemp_task);
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr = adapter->interrupt_event;
- if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE))
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
return;
+ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+
switch (hw->device_id) {
- case IXGBE_DEV_ID_82599_T3_LOM: {
- u32 autoneg;
- bool link_up = false;
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ /*
+ * Since the warning interrupt is for both ports
+ * we don't have to check if:
+ * - This interrupt wasn't for our port.
+ * - We may have missed the interrupt so always have to
+ * check if we got a LSC
+ */
+ if (!(eicr & IXGBE_EICR_GPI_SDP0) &&
+ !(eicr & IXGBE_EICR_LSC))
+ return;
+
+ if (!(eicr & IXGBE_EICR_LSC) && hw->mac.ops.check_link) {
+ u32 autoneg;
+ bool link_up = false;
- if (hw->mac.ops.check_link)
hw->mac.ops.check_link(hw, &autoneg, &link_up, false);
- if (((eicr & IXGBE_EICR_GPI_SDP0) && (!link_up)) ||
- (eicr & IXGBE_EICR_LSC))
- /* Check if this is due to overtemp */
- if (hw->phy.ops.check_overtemp(hw) == IXGBE_ERR_OVERTEMP)
- break;
- return;
- }
+ if (link_up)
+ return;
+ }
+
+ /* Check if this is not due to overtemp */
+ if (hw->phy.ops.check_overtemp(hw) != IXGBE_ERR_OVERTEMP)
+ return;
+
+ break;
default:
if (!(eicr & IXGBE_EICR_GPI_SDP0))
return;
@@ -1825,8 +1881,8 @@ static void ixgbe_check_overtemp_task(struct work_struct *work)
"Network adapter has been stopped because it has over heated. "
"Restart the computer. If the problem persists, "
"power off the system and replace the adapter\n");
- /* write to clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
+
+ adapter->interrupt_event = 0;
}
static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1848,15 +1904,19 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
if (eicr & IXGBE_EICR_GPI_SDP2) {
/* Clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- schedule_work(&adapter->sfp_config_module_task);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
+ ixgbe_service_event_schedule(adapter);
+ }
}
if (eicr & IXGBE_EICR_GPI_SDP1) {
/* Clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
- if (!test_bit(__IXGBE_DOWN, &adapter->state))
- schedule_work(&adapter->multispeed_fiber_task);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ ixgbe_service_event_schedule(adapter);
+ }
}
}
@@ -1870,7 +1930,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
IXGBE_WRITE_FLUSH(hw);
- schedule_work(&adapter->watchdog_task);
+ ixgbe_service_event_schedule(adapter);
}
}
@@ -1898,26 +1958,32 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
- ixgbe_check_sfp_event(adapter, eicr);
- if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
- ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
- adapter->interrupt_event = eicr;
- schedule_work(&adapter->check_overtemp_task);
- }
- /* now fallthrough to handle Flow Director */
case ixgbe_mac_X540:
/* Handle Flow Director Full threshold interrupt */
if (eicr & IXGBE_EICR_FLOW_DIR) {
+ int reinit_count = 0;
int i;
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
- /* Disable transmits before FDIR Re-initialization */
- netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *tx_ring =
- adapter->tx_ring[i];
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
- &tx_ring->state))
- schedule_work(&adapter->fdir_reinit_task);
+ &ring->state))
+ reinit_count++;
+ }
+ if (reinit_count) {
+ /* no more flow director interrupts until after init */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR);
+ eicr &= ~IXGBE_EICR_FLOW_DIR;
+ adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
+ ixgbe_service_event_schedule(adapter);
+ }
+ }
+ ixgbe_check_sfp_event(adapter, eicr);
+ if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+ ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->interrupt_event = eicr;
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+ ixgbe_service_event_schedule(adapter);
}
}
break;
@@ -1927,8 +1993,10 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
+ /* re-enable the original interrupt state, no lsc, no queues */
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, eicr &
+ ~(IXGBE_EIMS_LSC | IXGBE_EIMS_RTX_QUEUE));
return IRQ_HANDLED;
}
@@ -2513,8 +2581,11 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_sfp_event(adapter, eicr);
if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
- adapter->interrupt_event = eicr;
- schedule_work(&adapter->check_overtemp_task);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
+ adapter->interrupt_event = eicr;
+ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT;
+ ixgbe_service_event_schedule(adapter);
+ }
}
break;
default:
@@ -2731,7 +2802,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
/* poll to verify queue is enabled */
do {
- msleep(1);
+ usleep_range(1000, 2000);
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
} while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE));
if (!wait_loop)
@@ -3023,7 +3094,7 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
return;
do {
- msleep(1);
+ usleep_range(1000, 2000);
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
} while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE));
@@ -3178,7 +3249,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
/* enable Tx loopback for VF/PF communication */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
/* Enable MAC Anti-Spoofing */
- hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
+ hw->mac.ops.set_mac_anti_spoofing(hw,
+ (adapter->antispoofing_enabled =
+ (adapter->num_vfs != 0)),
adapter->num_vfs);
}
@@ -3487,7 +3560,7 @@ static int ixgbe_write_uc_addr_list(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
unsigned int vfn = adapter->num_vfs;
- unsigned int rar_entries = hw->mac.num_rar_entries - (vfn + 1);
+ unsigned int rar_entries = IXGBE_MAX_PF_MACVLANS;
int count = 0;
/* return ENOMEM indicating insufficient memory for addresses */
@@ -3556,7 +3629,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
} else {
/*
* Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscous mode so
+ * then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
hw->mac.ops.update_mc_addr_list(hw, netdev);
@@ -3567,7 +3640,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/*
* Write addresses to available RAR registers, if there is not
* sufficient space to store all the addresses then enable
- * unicast promiscous mode
+ * unicast promiscuous mode
*/
count = ixgbe_write_uc_addr_list(netdev);
if (count < 0) {
@@ -3760,31 +3833,16 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
**/
static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
{
- struct ixgbe_hw *hw = &adapter->hw;
+ /*
+ * We are assuming the worst case scenerio here, and that
+ * is that an SFP was inserted/removed after the reset
+ * but before SFP detection was enabled. As such the best
+ * solution is to just start searching as soon as we start
+ */
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
- if (hw->phy.multispeed_fiber) {
- /*
- * In multispeed fiber setups, the device may not have
- * had a physical connection when the driver loaded.
- * If that's the case, the initial link configuration
- * couldn't get the MAC into 10G or 1G mode, so we'll
- * never have a link status change interrupt fire.
- * We need to try and force an autonegotiation
- * session, then bring up link.
- */
- if (hw->mac.ops.setup_sfp)
- hw->mac.ops.setup_sfp(hw);
- if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
- schedule_work(&adapter->multispeed_fiber_task);
- } else {
- /*
- * Direct Attach Cu and non-multispeed fiber modules
- * still need to be configured properly prior to
- * attempting link.
- */
- if (!(adapter->flags & IXGBE_FLAG_IN_SFP_MOD_TASK))
- schedule_work(&adapter->sfp_config_module_task);
- }
+ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
}
/**
@@ -3860,9 +3918,10 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
gpie |= IXGBE_SDP1_GPIEN;
- if (hw->mac.type == ixgbe_mac_82599EB)
+ if (hw->mac.type == ixgbe_mac_82599EB) {
gpie |= IXGBE_SDP1_GPIEN;
gpie |= IXGBE_SDP2_GPIEN;
+ }
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
}
@@ -3913,17 +3972,6 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
e_crit(drv, "Fan has stopped, replace the adapter\n");
}
- /*
- * For hot-pluggable SFP+ devices, a new SFP+ module may have
- * arrived before interrupts were enabled but after probe. Such
- * devices wouldn't have their type identified yet. We need to
- * kick off the SFP+ module setup first, then try to bring up link.
- * If we're not hot-pluggable SFP+, we just need to configure link
- * and bring it up.
- */
- if (hw->phy.type == ixgbe_phy_none)
- schedule_work(&adapter->sfp_config_module_task);
-
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
@@ -3931,7 +3979,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
* link up interrupt but shouldn't be a problem */
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
- mod_timer(&adapter->watchdog_timer, jiffies);
+ mod_timer(&adapter->service_timer, jiffies);
/* Set PF Reset Done bit so PF/VF Mail Ops can work */
ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
@@ -3944,8 +3992,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
{
WARN_ON(in_interrupt());
+ /* put off any impending NetWatchDogTimeout */
+ adapter->netdev->trans_start = jiffies;
+
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
ixgbe_down(adapter);
/*
* If SR-IOV enabled then wait a bit before bringing the adapter
@@ -3972,10 +4023,20 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
int err;
+ /* lock SFP init bit to prevent race conditions with the watchdog */
+ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ usleep_range(1000, 2000);
+
+ /* clear all SFP and link config related flags while holding SFP_INIT */
+ adapter->flags2 &= ~(IXGBE_FLAG2_SEARCH_FOR_SFP |
+ IXGBE_FLAG2_SFP_NEEDS_RESET);
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+
err = hw->mac.ops.init_hw(hw);
switch (err) {
case 0:
case IXGBE_ERR_SFP_NOT_PRESENT:
+ case IXGBE_ERR_SFP_NOT_SUPPORTED:
break;
case IXGBE_ERR_MASTER_REQUESTS_PENDING:
e_dev_err("master disable timed out\n");
@@ -3993,6 +4054,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
e_dev_err("Hardware Error: %d\n", err);
}
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+
/* reprogram the RAR[0] in case user changed it. */
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
IXGBE_RAH_AV);
@@ -4121,26 +4184,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u32 rxctrl;
- u32 txdctl;
int i;
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* signal that we are down to the interrupt handler */
set_bit(__IXGBE_DOWN, &adapter->state);
- /* disable receive for all VFs and wait one second */
- if (adapter->num_vfs) {
- /* ping all the active vfs to let them know we are going down */
- ixgbe_ping_all_vfs(adapter);
-
- /* Disable all VFTE/VFRE TX/RX */
- ixgbe_disable_tx_rx(adapter);
-
- /* Mark all the VFs as inactive */
- for (i = 0 ; i < adapter->num_vfs; i++)
- adapter->vfinfo[i].clear_to_send = 0;
- }
-
/* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -4150,15 +4199,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* this call also flushes the previous write */
ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
- msleep(10);
+ usleep_range(10000, 20000);
netif_tx_stop_all_queues(netdev);
- clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
- del_timer_sync(&adapter->sfp_timer);
- del_timer_sync(&adapter->watchdog_timer);
- cancel_work_sync(&adapter->watchdog_task);
-
+ /* call carrier off first to avoid false dev_watchdog timeouts */
netif_carrier_off(netdev);
netif_tx_disable(netdev);
@@ -4166,6 +4211,25 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
ixgbe_napi_disable_all(adapter);
+ adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT |
+ IXGBE_FLAG2_RESET_REQUESTED);
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+
+ del_timer_sync(&adapter->service_timer);
+
+ /* disable receive for all VFs and wait one second */
+ if (adapter->num_vfs) {
+ /* ping all the active vfs to let them know we are going down */
+ ixgbe_ping_all_vfs(adapter);
+
+ /* Disable all VFTE/VFRE TX/RX */
+ ixgbe_disable_tx_rx(adapter);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].clear_to_send = 0;
+ }
+
/* Cleanup the affinity_hint CPU mask memory and callback */
for (i = 0; i < num_q_vectors; i++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
@@ -4175,21 +4239,13 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
free_cpumask_var(q_vector->affinity_mask);
}
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
- adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
- cancel_work_sync(&adapter->fdir_reinit_task);
-
- if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
- cancel_work_sync(&adapter->check_overtemp_task);
-
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
u8 reg_idx = adapter->tx_ring[i]->reg_idx;
- txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
- (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH);
}
- /* Disable the Tx DMA engine on 82599 */
+
+ /* Disable the Tx DMA engine on 82599 and X540 */
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
@@ -4201,9 +4257,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
break;
}
- /* clear n-tuple filters that are cached */
- ethtool_ntuple_flush(netdev);
-
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
@@ -4267,25 +4320,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- adapter->tx_timeout_count++;
-
/* Do the reset outside of interrupt context */
- schedule_work(&adapter->reset_task);
-}
-
-static void ixgbe_reset_task(struct work_struct *work)
-{
- struct ixgbe_adapter *adapter;
- adapter = container_of(work, struct ixgbe_adapter, reset_task);
-
- /* If we're already down or resetting, just bail */
- if (test_bit(__IXGBE_DOWN, &adapter->state) ||
- test_bit(__IXGBE_RESETTING, &adapter->state))
- return;
-
- ixgbe_dump(adapter);
- netdev_err(adapter->netdev, "Reset adapter\n");
- ixgbe_reinit_locked(adapter);
+ ixgbe_tx_timeout_reset(adapter);
}
/**
@@ -4443,7 +4479,7 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
}
/*
- * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
@@ -4567,8 +4603,8 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCB
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
-void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
- unsigned int *tx, unsigned int *rx)
+static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
+ unsigned int *tx, unsigned int *rx)
{
struct net_device *dev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
@@ -5138,57 +5174,6 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_sfp_timer - worker thread to find a missing module
- * @data: pointer to our adapter struct
- **/
-static void ixgbe_sfp_timer(unsigned long data)
-{
- struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
-
- /*
- * Do the sfp_timer outside of interrupt context due to the
- * delays that sfp+ detection requires
- */
- schedule_work(&adapter->sfp_task);
-}
-
-/**
- * ixgbe_sfp_task - worker thread to find a missing module
- * @work: pointer to work_struct containing our data
- **/
-static void ixgbe_sfp_task(struct work_struct *work)
-{
- struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- sfp_task);
- struct ixgbe_hw *hw = &adapter->hw;
-
- if ((hw->phy.type == ixgbe_phy_nl) &&
- (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
- s32 ret = hw->phy.ops.identify_sfp(hw);
- if (ret == IXGBE_ERR_SFP_NOT_PRESENT)
- goto reschedule;
- ret = hw->phy.ops.reset(hw);
- if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- e_dev_err("failed to initialize because an unsupported "
- "SFP+ module type was detected.\n");
- e_dev_err("Reload the driver after installing a "
- "supported module.\n");
- unregister_netdev(adapter->netdev);
- } else {
- e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
- }
- /* don't need this routine any more */
- clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
- }
- return;
-reschedule:
- if (test_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state))
- mod_timer(&adapter->sfp_timer,
- round_jiffies(jiffies + (2 * HZ)));
-}
-
-/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
*
@@ -5904,8 +5889,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
break;
- case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ /* OS2BMC stats are X540 only*/
+ hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
+ hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
+ hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
+ hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
+ case ixgbe_mac_82599EB:
hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
@@ -5979,23 +5969,66 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table
+ * @adapter - pointer to the device adapter structure
**/
-static void ixgbe_watchdog(unsigned long data)
+static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
{
- struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
struct ixgbe_hw *hw = &adapter->hw;
- u64 eics = 0;
int i;
- /*
- * Do the watchdog outside of interrupt context due to the lovely
- * delays that some of the newer hardware requires
- */
+ if (!(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_FDIR_REQUIRES_REINIT;
+ /* if interface is down do nothing */
if (test_bit(__IXGBE_DOWN, &adapter->state))
- goto watchdog_short_circuit;
+ return;
+
+ /* do nothing if we are not using signature filters */
+ if (!(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE))
+ return;
+
+ adapter->fdir_overflow++;
+
+ if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &(adapter->tx_ring[i]->state));
+ /* re-enable flow director interrupts */
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
+ } else {
+ e_err(probe, "failed to finish FDIR re-initialization, "
+ "ignored adding FDIR ATR filters\n");
+ }
+}
+
+/**
+ * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts
+ * @adapter - pointer to the device adapter structure
+ *
+ * This function serves two purposes. First it strobes the interrupt lines
+ * in order to make certain interrupts are occuring. Secondly it sets the
+ * bits needed to check for TX hangs. As a result we should immediately
+ * determine if a hang has occured.
+ */
+static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 eics = 0;
+ int i;
+
+ /* If we're down or resetting, just bail */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return;
+
+ /* Force detection of hung controller */
+ if (netif_carrier_ok(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ set_check_for_tx_hang(adapter->tx_ring[i]);
+ }
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
/*
@@ -6005,108 +6038,172 @@ static void ixgbe_watchdog(unsigned long data)
*/
IXGBE_WRITE_REG(hw, IXGBE_EICS,
(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
- goto watchdog_reschedule;
- }
-
- /* get one bit for every active tx/rx interrupt vector */
- for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
- struct ixgbe_q_vector *qv = adapter->q_vector[i];
- if (qv->rxr_count || qv->txr_count)
- eics |= ((u64)1 << i);
+ } else {
+ /* get one bit for every active tx/rx interrupt vector */
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ struct ixgbe_q_vector *qv = adapter->q_vector[i];
+ if (qv->rxr_count || qv->txr_count)
+ eics |= ((u64)1 << i);
+ }
}
- /* Cause software interrupt to ensure rx rings are cleaned */
+ /* Cause software interrupt to ensure rings are cleaned */
ixgbe_irq_rearm_queues(adapter, eics);
-watchdog_reschedule:
- /* Reset the timer */
- mod_timer(&adapter->watchdog_timer, round_jiffies(jiffies + 2 * HZ));
-
-watchdog_short_circuit:
- schedule_work(&adapter->watchdog_task);
}
/**
- * ixgbe_multispeed_fiber_task - worker thread to configure multispeed fiber
- * @work: pointer to work_struct containing our data
+ * ixgbe_watchdog_update_link - update the link status
+ * @adapter - pointer to the device adapter structure
+ * @link_speed - pointer to a u32 to store the link_speed
**/
-static void ixgbe_multispeed_fiber_task(struct work_struct *work)
+static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
{
- struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- multispeed_fiber_task);
struct ixgbe_hw *hw = &adapter->hw;
- u32 autoneg;
- bool negotiation;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+ int i;
- adapter->flags |= IXGBE_FLAG_IN_SFP_LINK_TASK;
- autoneg = hw->phy.autoneg_advertised;
- if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
- hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
- hw->mac.autotry_restart = false;
- if (hw->mac.ops.setup_link)
- hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
- adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
- adapter->flags &= ~IXGBE_FLAG_IN_SFP_LINK_TASK;
+ if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
+ return;
+
+ if (hw->mac.ops.check_link) {
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+ } else {
+ /* always assume link is up, if no check link function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
+ if (link_up) {
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+ hw->mac.ops.fc_enable(hw, i);
+ } else {
+ hw->mac.ops.fc_enable(hw, 0);
+ }
+ }
+
+ if (link_up ||
+ time_after(jiffies, (adapter->link_check_timeout +
+ IXGBE_TRY_LINK_TIMEOUT))) {
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
}
/**
- * ixgbe_sfp_config_module_task - worker thread to configure a new SFP+ module
- * @work: pointer to work_struct containing our data
+ * ixgbe_watchdog_link_is_up - update netif_carrier status and
+ * print link up message
+ * @adapter - pointer to the device adapter structure
**/
-static void ixgbe_sfp_config_module_task(struct work_struct *work)
+static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
{
- struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- sfp_config_module_task);
+ struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 err;
+ u32 link_speed = adapter->link_speed;
+ bool flow_rx, flow_tx;
- adapter->flags |= IXGBE_FLAG_IN_SFP_MOD_TASK;
+ /* only continue if link was previously down */
+ if (netif_carrier_ok(netdev))
+ return;
- /* Time for electrical oscillations to settle down */
- msleep(100);
- err = hw->phy.ops.identify_sfp(hw);
+ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- e_dev_err("failed to initialize because an unsupported SFP+ "
- "module type was detected.\n");
- e_dev_err("Reload the driver after installing a supported "
- "module.\n");
- unregister_netdev(adapter->netdev);
- return;
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB: {
+ u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
+ flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
+ flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
+ }
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_82599EB: {
+ u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+ u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+ flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+ flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+ }
+ break;
+ default:
+ flow_tx = false;
+ flow_rx = false;
+ break;
}
- if (hw->mac.ops.setup_sfp)
- hw->mac.ops.setup_sfp(hw);
+ e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
+ "10 Gbps" :
+ (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ "1 Gbps" :
+ (link_speed == IXGBE_LINK_SPEED_100_FULL ?
+ "100 Mbps" :
+ "unknown speed"))),
+ ((flow_rx && flow_tx) ? "RX/TX" :
+ (flow_rx ? "RX" :
+ (flow_tx ? "TX" : "None"))));
- if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
- /* This will also work for DA Twinax connections */
- schedule_work(&adapter->multispeed_fiber_task);
- adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;
+ netif_carrier_on(netdev);
+#ifdef HAVE_IPLINK_VF_CONFIG
+ ixgbe_check_vf_rate_limit(adapter);
+#endif /* HAVE_IPLINK_VF_CONFIG */
}
/**
- * ixgbe_fdir_reinit_task - worker thread to reinit FDIR filter table
- * @work: pointer to work_struct containing our data
+ * ixgbe_watchdog_link_is_down - update netif_carrier status and
+ * print link down message
+ * @adapter - pointer to the adapter structure
**/
-static void ixgbe_fdir_reinit_task(struct work_struct *work)
+static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter)
{
- struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- fdir_reinit_task);
+ struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->link_up = false;
+ adapter->link_speed = 0;
+
+ /* only continue if link was up previously */
+ if (!netif_carrier_ok(netdev))
+ return;
+
+ /* poll for SFP+ cable when link is down */
+ if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
+ adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
+
+ e_info(drv, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+}
+
+/**
+ * ixgbe_watchdog_flush_tx - flush queues on link down
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
+{
int i;
+ int some_tx_pending = 0;
- if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- set_bit(__IXGBE_TX_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->state));
- } else {
- e_err(probe, "failed to finish FDIR re-initialization, "
- "ignored adding FDIR ATR filters\n");
+ if (!netif_carrier_ok(adapter->netdev)) {
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+ if (tx_ring->next_to_use != tx_ring->next_to_clean) {
+ some_tx_pending = 1;
+ break;
+ }
+ }
+
+ if (some_tx_pending) {
+ /* We've lost link, so the controller stops DMA,
+ * but we've got queued Tx work that's never going
+ * to get done, so reset controller to flush Tx.
+ * (Do the reset outside of interrupt context).
+ */
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ }
}
- /* Done FDIR Re-initialization, enable transmits */
- netif_tx_start_all_queues(adapter->netdev);
}
static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
@@ -6129,133 +6226,186 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
}
-static DEFINE_MUTEX(ixgbe_watchdog_lock);
+/**
+ * ixgbe_watchdog_subtask - check and bring link up
+ * @adapter - pointer to the device adapter structure
+ **/
+static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter)
+{
+ /* if interface is down do nothing */
+ if (test_bit(__IXGBE_DOWN, &adapter->state))
+ return;
+
+ ixgbe_watchdog_update_link(adapter);
+
+ if (adapter->link_up)
+ ixgbe_watchdog_link_is_up(adapter);
+ else
+ ixgbe_watchdog_link_is_down(adapter);
+
+ ixgbe_spoof_check(adapter);
+ ixgbe_update_stats(adapter);
+
+ ixgbe_watchdog_flush_tx(adapter);
+}
/**
- * ixgbe_watchdog_task - worker thread to bring link up
- * @work: pointer to work_struct containing our data
+ * ixgbe_sfp_detection_subtask - poll for SFP+ cable
+ * @adapter - the ixgbe adapter structure
**/
-static void ixgbe_watchdog_task(struct work_struct *work)
+static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
{
- struct ixgbe_adapter *adapter = container_of(work,
- struct ixgbe_adapter,
- watchdog_task);
- struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed;
- bool link_up;
- int i;
- struct ixgbe_ring *tx_ring;
- int some_tx_pending = 0;
+ s32 err;
- mutex_lock(&ixgbe_watchdog_lock);
+ /* not searching for SFP so there is nothing to do here */
+ if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
+ !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
+ return;
- link_up = adapter->link_up;
- link_speed = adapter->link_speed;
+ /* someone else is in init, wait until next service event */
+ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ return;
- if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
- hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
- if (link_up) {
-#ifdef CONFIG_DCB
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
- hw->mac.ops.fc_enable(hw, i);
- } else {
- hw->mac.ops.fc_enable(hw, 0);
- }
-#else
- hw->mac.ops.fc_enable(hw, 0);
-#endif
- }
+ err = hw->phy.ops.identify_sfp(hw);
+ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto sfp_out;
- if (link_up ||
- time_after(jiffies, (adapter->link_check_timeout +
- IXGBE_TRY_LINK_TIMEOUT))) {
- adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC);
- }
- adapter->link_up = link_up;
- adapter->link_speed = link_speed;
+ if (err == IXGBE_ERR_SFP_NOT_PRESENT) {
+ /* If no cable is present, then we need to reset
+ * the next time we find a good cable. */
+ adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET;
}
- if (link_up) {
- if (!netif_carrier_ok(netdev)) {
- bool flow_rx, flow_tx;
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB: {
- u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
- u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
- flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
- flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
- }
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540: {
- u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
- u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
- flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
- flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
- }
- break;
- default:
- flow_tx = false;
- flow_rx = false;
- break;
- }
+ /* exit on error */
+ if (err)
+ goto sfp_out;
- e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
- (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
- "10 Gbps" :
- (link_speed == IXGBE_LINK_SPEED_1GB_FULL ?
- "1 Gbps" :
- (link_speed == IXGBE_LINK_SPEED_100_FULL ?
- "100 Mbps" :
- "unknown speed"))),
- ((flow_rx && flow_tx) ? "RX/TX" :
- (flow_rx ? "RX" :
- (flow_tx ? "TX" : "None"))));
-
- netif_carrier_on(netdev);
- ixgbe_check_vf_rate_limit(adapter);
- } else {
- /* Force detection of hung controller */
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring = adapter->tx_ring[i];
- set_check_for_tx_hang(tx_ring);
- }
- }
- } else {
- adapter->link_up = false;
- adapter->link_speed = 0;
- if (netif_carrier_ok(netdev)) {
- e_info(drv, "NIC Link is Down\n");
- netif_carrier_off(netdev);
- }
- }
+ /* exit if reset not needed */
+ if (!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
+ goto sfp_out;
- if (!netif_carrier_ok(netdev)) {
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring = adapter->tx_ring[i];
- if (tx_ring->next_to_use != tx_ring->next_to_clean) {
- some_tx_pending = 1;
- break;
- }
- }
+ adapter->flags2 &= ~IXGBE_FLAG2_SFP_NEEDS_RESET;
- if (some_tx_pending) {
- /* We've lost link, so the controller stops DMA,
- * but we've got queued Tx work that's never going
- * to get done, so reset controller to flush Tx.
- * (Do the reset outside of interrupt context).
- */
- schedule_work(&adapter->reset_task);
- }
+ /*
+ * A module may be identified correctly, but the EEPROM may not have
+ * support for that module. setup_sfp() will fail in that case, so
+ * we should not allow that module to load.
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ err = hw->phy.ops.reset(hw);
+ else
+ err = hw->mac.ops.setup_sfp(hw);
+
+ if (err == IXGBE_ERR_SFP_NOT_SUPPORTED)
+ goto sfp_out;
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG;
+ e_info(probe, "detected SFP+: %d\n", hw->phy.sfp_type);
+
+sfp_out:
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+
+ if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) &&
+ (adapter->netdev->reg_state == NETREG_REGISTERED)) {
+ e_dev_err("failed to initialize because an unsupported "
+ "SFP+ module type was detected.\n");
+ e_dev_err("Reload the driver after installing a "
+ "supported module.\n");
+ unregister_netdev(adapter->netdev);
}
+}
- ixgbe_spoof_check(adapter);
- ixgbe_update_stats(adapter);
- mutex_unlock(&ixgbe_watchdog_lock);
+/**
+ * ixgbe_sfp_link_config_subtask - set up link SFP after module install
+ * @adapter - the ixgbe adapter structure
+ **/
+static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 autoneg;
+ bool negotiation;
+
+ if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_CONFIG))
+ return;
+
+ /* someone else is in init, wait until next service event */
+ if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ return;
+
+ adapter->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG;
+
+ autoneg = hw->phy.autoneg_advertised;
+ if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+ hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiation);
+ hw->mac.autotry_restart = false;
+ if (hw->mac.ops.setup_link)
+ hw->mac.ops.setup_link(hw, autoneg, negotiation, true);
+
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
+}
+
+/**
+ * ixgbe_service_timer - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbe_service_timer(unsigned long data)
+{
+ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data;
+ unsigned long next_event_offset;
+
+ /* poll faster when waiting for link */
+ if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
+ next_event_offset = HZ / 10;
+ else
+ next_event_offset = HZ * 2;
+
+ /* Reset the timer */
+ mod_timer(&adapter->service_timer, next_event_offset + jiffies);
+
+ ixgbe_service_event_schedule(adapter);
+}
+
+static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter)
+{
+ if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED))
+ return;
+
+ adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED;
+
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+ test_bit(__IXGBE_RESETTING, &adapter->state))
+ return;
+
+ ixgbe_dump(adapter);
+ netdev_err(adapter->netdev, "Reset adapter\n");
+ adapter->tx_timeout_count++;
+
+ ixgbe_reinit_locked(adapter);
+}
+
+/**
+ * ixgbe_service_task - manages and runs subtasks
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbe_service_task(struct work_struct *work)
+{
+ struct ixgbe_adapter *adapter = container_of(work,
+ struct ixgbe_adapter,
+ service_task);
+
+ ixgbe_reset_subtask(adapter);
+ ixgbe_sfp_detection_subtask(adapter);
+ ixgbe_sfp_link_config_subtask(adapter);
+ ixgbe_check_overtemp_subtask(adapter);
+ ixgbe_watchdog_subtask(adapter);
+ ixgbe_fdir_reinit_subtask(adapter);
+ ixgbe_check_hang_subtask(adapter);
+
+ ixgbe_service_event_complete(adapter);
}
static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -7094,6 +7244,8 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
#ifdef CONFIG_PCI_IOV
struct ixgbe_hw *hw = &adapter->hw;
int err;
+ int num_vf_macvlans, i;
+ struct vf_macvlans *mv_list;
if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs)
return;
@@ -7110,6 +7262,26 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
e_err(probe, "Failed to enable PCI sriov: %d\n", err);
goto err_novfs;
}
+
+ num_vf_macvlans = hw->mac.num_rar_entries -
+ (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs);
+
+ adapter->mv_list = mv_list = kcalloc(num_vf_macvlans,
+ sizeof(struct vf_macvlans),
+ GFP_KERNEL);
+ if (mv_list) {
+ /* Initialize list of VF macvlans */
+ INIT_LIST_HEAD(&adapter->vf_mvs.l);
+ for (i = 0; i < num_vf_macvlans; i++) {
+ mv_list->vf = -1;
+ mv_list->free = true;
+ mv_list->rar_entry = hw->mac.num_rar_entries -
+ (i + adapter->num_vfs + 1);
+ list_add(&mv_list->l, &adapter->vf_mvs.l);
+ mv_list++;
+ }
+ }
+
/* If call to enable VFs succeeded then allocate memory
* for per VF control structures.
*/
@@ -7280,22 +7452,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
hw->phy.mdio.mdio_read = ixgbe_mdio_read;
hw->phy.mdio.mdio_write = ixgbe_mdio_write;
- /* set up this timer and work struct before calling get_invariants
- * which might start the timer
- */
- init_timer(&adapter->sfp_timer);
- adapter->sfp_timer.function = ixgbe_sfp_timer;
- adapter->sfp_timer.data = (unsigned long) adapter;
-
- INIT_WORK(&adapter->sfp_task, ixgbe_sfp_task);
-
- /* multispeed fiber has its own tasklet, called from GPI SDP1 context */
- INIT_WORK(&adapter->multispeed_fiber_task, ixgbe_multispeed_fiber_task);
-
- /* a new SFP+ module arrival, called from GPI SDP2 context */
- INIT_WORK(&adapter->sfp_config_module_task,
- ixgbe_sfp_config_module_task);
-
ii->get_invariants(hw);
/* setup the private structure */
@@ -7329,17 +7485,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
hw->phy.reset_if_overtemp = false;
if (err == IXGBE_ERR_SFP_NOT_PRESENT &&
hw->mac.type == ixgbe_mac_82598EB) {
- /*
- * Start a kernel thread to watch for a module to arrive.
- * Only do this for 82598, since 82599 will generate
- * interrupts on module arrival.
- */
- set_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
- mod_timer(&adapter->sfp_timer,
- round_jiffies(jiffies + (2 * HZ)));
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- e_dev_err("failed to initialize because an unsupported SFP+ "
+ e_dev_err("failed to load because an unsupported SFP+ "
"module type was detected.\n");
e_dev_err("Reload the driver after installing a supported "
"module.\n");
@@ -7361,9 +7509,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
netdev->features |= NETIF_F_GRO;
+ netdev->features |= NETIF_F_RXHASH;
- if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+ switch (adapter->hw.mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
netdev->features |= NETIF_F_SCTP_CSUM;
+ break;
+ default:
+ break;
+ }
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_TSO6;
@@ -7424,17 +7579,19 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
(hw->mac.type == ixgbe_mac_82599EB))))
hw->mac.ops.disable_tx_laser(hw);
- init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = ixgbe_watchdog;
- adapter->watchdog_timer.data = (unsigned long)adapter;
+ setup_timer(&adapter->service_timer, &ixgbe_service_timer,
+ (unsigned long) adapter);
- INIT_WORK(&adapter->reset_task, ixgbe_reset_task);
- INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task);
+ INIT_WORK(&adapter->service_task, ixgbe_service_task);
+ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
err = ixgbe_init_interrupt_scheme(adapter);
if (err)
goto err_sw_init;
+ if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+ netdev->features &= ~NETIF_F_RXHASH;
+
switch (pdev->device) {
case IXGBE_DEV_ID_82599_SFP:
/* Only this subdevice supports WOL */
@@ -7463,8 +7620,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* print bus type/speed/width info */
e_dev_info("(PCI Express:%s:%s) %pM\n",
- (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0Gb/s" :
- hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5Gb/s" :
+ (hw->bus.speed == ixgbe_bus_speed_5000 ? "5.0GT/s" :
+ hw->bus.speed == ixgbe_bus_speed_2500 ? "2.5GT/s" :
"Unknown"),
(hw->bus.width == ixgbe_bus_width_pcie_x8 ? "Width x8" :
hw->bus.width == ixgbe_bus_width_pcie_x4 ? "Width x4" :
@@ -7513,13 +7670,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
- adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
- INIT_WORK(&adapter->fdir_reinit_task, ixgbe_fdir_reinit_task);
-
- if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
- INIT_WORK(&adapter->check_overtemp_task,
- ixgbe_check_overtemp_task);
#ifdef CONFIG_IXGBE_DCA
if (dca_add_requester(&pdev->dev) == 0) {
adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
@@ -7546,11 +7696,7 @@ err_sw_init:
err_eeprom:
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
ixgbe_disable_sriov(adapter);
- clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
- del_timer_sync(&adapter->sfp_timer);
- cancel_work_sync(&adapter->sfp_task);
- cancel_work_sync(&adapter->multispeed_fiber_task);
- cancel_work_sync(&adapter->sfp_config_module_task);
+ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
@@ -7578,24 +7724,7 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
struct net_device *netdev = adapter->netdev;
set_bit(__IXGBE_DOWN, &adapter->state);
-
- /*
- * The timers may be rescheduled, so explicitly disable them
- * from being rescheduled.
- */
- clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
- del_timer_sync(&adapter->watchdog_timer);
- del_timer_sync(&adapter->sfp_timer);
-
- cancel_work_sync(&adapter->watchdog_task);
- cancel_work_sync(&adapter->sfp_task);
- cancel_work_sync(&adapter->multispeed_fiber_task);
- cancel_work_sync(&adapter->sfp_config_module_task);
- if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
- adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
- cancel_work_sync(&adapter->fdir_reinit_task);
- if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
- cancel_work_sync(&adapter->check_overtemp_task);
+ cancel_work_sync(&adapter->service_task);
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index fe6ea81dc7f8..b239bdac38da 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -36,9 +36,6 @@
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
-#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
-#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
-
#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
@@ -70,6 +67,7 @@
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index f72f705f6183..735f686c3b36 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -449,7 +449,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
MDIO_MMD_AN,
&autoneg_reg);
- autoneg_reg &= ~ADVERTISE_100FULL;
+ autoneg_reg &= ~(ADVERTISE_100FULL |
+ ADVERTISE_100HALF);
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
autoneg_reg |= ADVERTISE_100FULL;
@@ -656,7 +657,8 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
MDIO_MMD_AN,
&autoneg_reg);
- autoneg_reg &= ~ADVERTISE_100FULL;
+ autoneg_reg &= ~(ADVERTISE_100FULL |
+ ADVERTISE_100HALF);
if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
autoneg_reg |= ADVERTISE_100FULL;
@@ -753,7 +755,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
&phy_data);
if ((phy_data & MDIO_CTRL1_RESET) == 0)
break;
- msleep(10);
+ usleep_range(10000, 20000);
}
if ((phy_data & MDIO_CTRL1_RESET) != 0) {
@@ -782,7 +784,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
case IXGBE_DELAY_NL:
data_offset++;
hw_dbg(hw, "DELAY: %d MS\n", edata);
- msleep(edata);
+ usleep_range(edata * 1000, edata * 2000);
break;
case IXGBE_DATA_NL:
hw_dbg(hw, "DATA:\n");
@@ -1220,7 +1222,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
swfw_mask = IXGBE_GSSR_PHY0_SM;
do {
- if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
status = IXGBE_ERR_SWFW_SYNC;
goto read_byte_out;
}
@@ -1267,7 +1269,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
break;
fail:
- ixgbe_release_swfw_sync(hw, swfw_mask);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
msleep(100);
ixgbe_i2c_bus_clear(hw);
retry++;
@@ -1278,7 +1280,7 @@ fail:
} while (retry < max_retry);
- ixgbe_release_swfw_sync(hw, swfw_mask);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
read_byte_out:
return status;
@@ -1306,7 +1308,7 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
else
swfw_mask = IXGBE_GSSR_PHY0_SM;
- if (ixgbe_acquire_swfw_sync(hw, swfw_mask) != 0) {
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
status = IXGBE_ERR_SWFW_SYNC;
goto write_byte_out;
}
@@ -1350,7 +1352,7 @@ fail:
hw_dbg(hw, "I2C byte write error.\n");
} while (retry < max_retry);
- ixgbe_release_swfw_sync(hw, swfw_mask);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
write_byte_out:
return status;
@@ -1694,7 +1696,7 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
}
/**
- * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
+ * ixgbe_tn_check_overtemp - Checks if an overtemp occurred.
* @hw: pointer to hardware structure
*
* Checks if the LASI temp alarm status was triggered due to overtemp
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 6e50d8328942..ac99b0458fe2 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -82,6 +82,21 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
return 0;
}
+static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct list_head *pos;
+ struct vf_macvlans *entry;
+
+ list_for_each(pos, &adapter->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->free == false)
+ hw->mac.ops.set_rar(hw, entry->rar_entry,
+ entry->vf_macvlan,
+ entry->vf, IXGBE_RAH_AV);
+ }
+}
+
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -102,6 +117,9 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
}
+
+ /* Restore any VF macvlans */
+ ixgbe_restore_vf_macvlans(adapter);
}
static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
@@ -110,7 +128,7 @@ static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
}
-void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
+static void ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf)
{
struct ixgbe_hw *hw = &adapter->hw;
int new_mtu = msgbuf[1];
@@ -200,6 +218,61 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
return 0;
}
+static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
+ int vf, int index, unsigned char *mac_addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct list_head *pos;
+ struct vf_macvlans *entry;
+
+ if (index <= 1) {
+ list_for_each(pos, &adapter->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->vf == vf) {
+ entry->vf = -1;
+ entry->free = true;
+ entry->is_macvlan = false;
+ hw->mac.ops.clear_rar(hw, entry->rar_entry);
+ }
+ }
+ }
+
+ /*
+ * If index was zero then we were asked to clear the uc list
+ * for the VF. We're done.
+ */
+ if (!index)
+ return 0;
+
+ entry = NULL;
+
+ list_for_each(pos, &adapter->vf_mvs.l) {
+ entry = list_entry(pos, struct vf_macvlans, l);
+ if (entry->free)
+ break;
+ }
+
+ /*
+ * If we traversed the entire list and didn't find a free entry
+ * then we're out of space on the RAR table. Also entry may
+ * be NULL because the original memory allocation for the list
+ * failed, which is not fatal but does mean we can't support
+ * VF requests for MACVLAN because we couldn't allocate
+ * memory for the list management required.
+ */
+ if (!entry || !entry->free)
+ return -ENOSPC;
+
+ entry->free = false;
+ entry->is_macvlan = true;
+ entry->vf = vf;
+ memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
+
+ hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+
+ return 0;
+}
+
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
{
unsigned char vf_mac_addr[6];
@@ -251,12 +324,12 @@ static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
{
u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
- u32 msgbuf[mbx_size];
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
struct ixgbe_hw *hw = &adapter->hw;
s32 retval;
int entries;
u16 *hash_list;
- int add, vid;
+ int add, vid, index;
u8 *new_mac;
retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
@@ -345,6 +418,24 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
}
break;
+ case IXGBE_VF_SET_MACVLAN:
+ index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
+ IXGBE_VT_MSGINFO_SHIFT;
+ /*
+ * If the VF is allowed to set MAC filters then turn off
+ * anti-spoofing to avoid false positives. An index
+ * greater than 0 will indicate the VF is setting a
+ * macvlan MAC filter.
+ */
+ if (index > 0 && adapter->antispoofing_enabled) {
+ hw->mac.ops.set_mac_anti_spoofing(hw, false,
+ adapter->num_vfs);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
+ adapter->antispoofing_enabled = false;
+ }
+ retval = ixgbe_set_vf_macvlan(adapter, vf, index,
+ (unsigned char *)(&msgbuf[1]));
+ break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
retval = IXGBE_ERR_MBX;
@@ -452,7 +543,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
goto out;
ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
ixgbe_set_vmolr(hw, vf, false);
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ if (adapter->antispoofing_enabled)
+ hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
adapter->vfinfo[vf].pf_vlan = vlan;
adapter->vfinfo[vf].pf_qos = qos;
dev_info(&adapter->pdev->dev,
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 25c1fb7eda06..fa43f2507f43 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -58,9 +58,11 @@
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
+#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
+#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
/* General Registers */
@@ -163,6 +165,9 @@
(0x0D018 + ((_i - 64) * 0x40)))
#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
(0x0D028 + ((_i - 64) * 0x40)))
+#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+ (0x0D02C + ((_i - 64) * 0x40)))
+#define IXGBE_RSCDBU 0x03028
#define IXGBE_RDDCC 0x02F20
#define IXGBE_RXMEMWRAP 0x03190
#define IXGBE_STARCTRL 0x03024
@@ -227,17 +232,23 @@
#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
-#define IXGBE_VT_CTL 0x051B0
-#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
-#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
-#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
-#define IXGBE_QDE 0x2F04
-#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
-#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
-#define IXGBE_VMRCTL(_i) (0x0F600 + ((_i) * 4))
-#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
-#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
-#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
+#define IXGBE_VT_CTL 0x051B0
+#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
+#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */
+#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
+#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
+#define IXGBE_QDE 0x2F04
+#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */
+#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
+#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
+#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
+#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
+#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
+#define IXGBE_RXFECCERR0 0x051B8
#define IXGBE_LLITHRESH 0x0EC90
#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -364,7 +375,7 @@
#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */
#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */
-#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all 6 wakeup filters*/
+#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */
#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */
/* Wake Up Status */
@@ -406,7 +417,6 @@
#define IXGBE_SECTXSTAT 0x08804
#define IXGBE_SECTXBUFFAF 0x08808
#define IXGBE_SECTXMINIFG 0x08810
-#define IXGBE_SECTXSTAT 0x08804
#define IXGBE_SECRXCTRL 0x08D00
#define IXGBE_SECRXSTAT 0x08D04
@@ -499,21 +509,6 @@
#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4
-/* HW RSC registers */
-#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
- (0x0D02C + ((_i - 64) * 0x40)))
-#define IXGBE_RSCDBU 0x03028
-#define IXGBE_RSCCTL_RSCEN 0x01
-#define IXGBE_RSCCTL_MAXDESC_1 0x00
-#define IXGBE_RSCCTL_MAXDESC_4 0x04
-#define IXGBE_RSCCTL_MAXDESC_8 0x08
-#define IXGBE_RSCCTL_MAXDESC_16 0x0C
-#define IXGBE_RXDADV_RSCCNT_SHIFT 17
-#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
-#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
-#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
-#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000
-
/* DCB registers */
#define IXGBE_RTRPCS 0x02430
#define IXGBE_RTTDCS 0x04900
@@ -522,6 +517,7 @@
#define IXGBE_RTRUP2TC 0x03020
#define IXGBE_RTTUP2TC 0x0C800
#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
@@ -540,7 +536,7 @@
(IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
-/* FCoE registers */
+/* FCoE DMA Context Registers */
#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */
#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */
#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */
@@ -677,6 +673,10 @@
#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */
#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */
#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_O2BGPTC 0x041C4
+#define IXGBE_O2BSPC 0x087B0
+#define IXGBE_B2OSPC 0x041C0
+#define IXGBE_B2OGPRC 0x02F90
#define IXGBE_PCRC8ECL 0x0E810
#define IXGBE_PCRC8ECH 0x0E811
#define IXGBE_PCRC8ECH_MASK 0x1F
@@ -742,17 +742,10 @@
#define IXGBE_PBACLR_82599 0x11068
#define IXGBE_CIAA_82599 0x11088
#define IXGBE_CIAD_82599 0x1108C
-#define IXGBE_PCIE_DIAG_0_82599 0x11090
-#define IXGBE_PCIE_DIAG_1_82599 0x11094
-#define IXGBE_PCIE_DIAG_2_82599 0x11098
-#define IXGBE_PCIE_DIAG_3_82599 0x1109C
-#define IXGBE_PCIE_DIAG_4_82599 0x110A0
-#define IXGBE_PCIE_DIAG_5_82599 0x110A4
-#define IXGBE_PCIE_DIAG_6_82599 0x110A8
-#define IXGBE_PCIE_DIAG_7_82599 0x110C0
-#define IXGBE_INTRPT_CSR_82599 0x110B0
-#define IXGBE_INTRPT_MASK_82599 0x110B8
+#define IXGBE_PICAUSE 0x110B0
+#define IXGBE_PIENA 0x110B8
#define IXGBE_CDQ_MBR_82599 0x110B4
+#define IXGBE_PCIESPARE 0x110BC
#define IXGBE_MISC_REG_82599 0x110F0
#define IXGBE_ECC_CTRL_0_82599 0x11100
#define IXGBE_ECC_CTRL_1_82599 0x11104
@@ -785,7 +778,19 @@
#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */
#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */
#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */
-#define IXGBE_RXUDP 0x08C1C /* Time Sync Rx UDP Port - RW */
+#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */
+#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */
+#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */
+#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */
+#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */
+#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */
+#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */
+#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */
+#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
+#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
+#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
+#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
/* Diagnostic Registers */
#define IXGBE_RDSTATCTL 0x02C20
@@ -829,8 +834,20 @@
#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
#define IXGBE_PCIEECCCTL 0x1106C
+#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
+#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
+#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
+#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
+#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
+#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
+#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
+#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
#define IXGBE_PCIEECCCTL0 0x11100
#define IXGBE_PCIEECCCTL1 0x11104
+#define IXGBE_RXDBUECC 0x03F70
+#define IXGBE_TXDBUECC 0x0CF70
+#define IXGBE_RXDBUEST 0x03F74
+#define IXGBE_TXDBUEST 0x0CF74
#define IXGBE_PBTXECC 0x0C300
#define IXGBE_PBRXECC 0x03300
#define IXGBE_GHECCR 0x110B0
@@ -871,6 +888,7 @@
#define IXGBE_AUTOC3 0x042AC
#define IXGBE_ANLP1 0x042B0
#define IXGBE_ANLP2 0x042B4
+#define IXGBE_MACC 0x04330
#define IXGBE_ATLASCTL 0x04800
#define IXGBE_MMNGC 0x042D0
#define IXGBE_ANLPNP1 0x042D4
@@ -883,14 +901,49 @@
#define IXGBE_MPVC 0x04318
#define IXGBE_SGMIIC 0x04314
+/* Statistics Registers */
+#define IXGBE_RXNFGPC 0x041B0
+#define IXGBE_RXNFGBCL 0x041B4
+#define IXGBE_RXNFGBCH 0x041B8
+#define IXGBE_RXDGPC 0x02F50
+#define IXGBE_RXDGBCL 0x02F54
+#define IXGBE_RXDGBCH 0x02F58
+#define IXGBE_RXDDGPC 0x02F5C
+#define IXGBE_RXDDGBCL 0x02F60
+#define IXGBE_RXDDGBCH 0x02F64
+#define IXGBE_RXLPBKGPC 0x02F68
+#define IXGBE_RXLPBKGBCL 0x02F6C
+#define IXGBE_RXLPBKGBCH 0x02F70
+#define IXGBE_RXDLPBKGPC 0x02F74
+#define IXGBE_RXDLPBKGBCL 0x02F78
+#define IXGBE_RXDLPBKGBCH 0x02F7C
+#define IXGBE_TXDGPC 0x087A0
+#define IXGBE_TXDGBCL 0x087A4
+#define IXGBE_TXDGBCH 0x087A8
+
+#define IXGBE_RXDSTATCTRL 0x02F40
+
+/* Copper Pond 2 link timeout */
#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
/* Omer CORECTL */
#define IXGBE_CORECTL 0x014F00
/* BARCTRL */
-#define IXGBE_BARCTRL 0x110F4
-#define IXGBE_BARCTRL_FLSIZE 0x0700
-#define IXGBE_BARCTRL_CSRSIZE 0x2000
+#define IXGBE_BARCTRL 0x110F4
+#define IXGBE_BARCTRL_FLSIZE 0x0700
+#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
+#define IXGBE_BARCTRL_CSRSIZE 0x2000
+
+/* RSCCTL Bit Masks */
+#define IXGBE_RSCCTL_RSCEN 0x01
+#define IXGBE_RSCCTL_MAXDESC_1 0x00
+#define IXGBE_RSCCTL_MAXDESC_4 0x04
+#define IXGBE_RSCCTL_MAXDESC_8 0x08
+#define IXGBE_RSCCTL_MAXDESC_16 0x0C
+
+/* RSCDBU Bit Masks */
+#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F
+#define IXGBE_RSCDBU_RSCACKDIS 0x00000080
/* RDRXCTL Bit Masks */
#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */
@@ -898,6 +951,8 @@
#define IXGBE_RDRXCTL_MVMEN 0x00000020
#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */
#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */
+#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
@@ -969,8 +1024,8 @@
#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */
#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */
#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */
-#define IXGBE_MSCA_READ 0x08000000 /* OP CODE 10 (read) */
-#define IXGBE_MSCA_READ_AUTOINC 0x0C000000 /* OP CODE 11 (read, auto inc)*/
+#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */
+#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/
#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */
#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */
#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */
@@ -1057,6 +1112,7 @@
#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */
#define IXGBE_GPIE_EIAME 0x40000000
#define IXGBE_GPIE_PBA_SUPPORT 0x80000000
+#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */
#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */
#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */
@@ -1291,6 +1347,11 @@
#define IXGBE_FTQF_POOL_SHIFT 8
#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F
#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25
+#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E
+#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D
+#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B
+#define IXGBE_FTQF_DEST_PORT_MASK 0x17
+#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F
#define IXGBE_FTQF_POOL_MASK_EN 0x40000000
#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000
@@ -1333,11 +1394,11 @@
*
* Current filters:
* EAPOL 802.1x (0x888e): Filter 0
- * BCN (0x8904): Filter 1
+ * FCoE (0x8906): Filter 2
* 1588 (0x88f7): Filter 3
+ * FIP (0x8914): Filter 4
*/
#define IXGBE_ETQF_FILTER_EAPOL 0
-#define IXGBE_ETQF_FILTER_BCN 1
#define IXGBE_ETQF_FILTER_FCOE 2
#define IXGBE_ETQF_FILTER_1588 3
#define IXGBE_ETQF_FILTER_FIP 4
@@ -1448,6 +1509,11 @@
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_MACC_FLU 0x00000001
+#define IXGBE_MACC_FSV_10G 0x00030000
+#define IXGBE_MACC_FS 0x00040000
+#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+
/* LINKS Bit Masks */
#define IXGBE_LINKS_KX_AN_COMP 0x80000000
#define IXGBE_LINKS_UP 0x40000000
@@ -1501,7 +1567,6 @@
#define IXGBE_ANLP1_ASM_PAUSE 0x0800
#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
-
/* SW Semaphore Register bitmasks */
#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
@@ -1514,6 +1579,10 @@
#define IXGBE_GSSR_PHY1_SM 0x0004
#define IXGBE_GSSR_MAC_CSR_SM 0x0008
#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_SW_MNG_SM 0x0400
+
+/* FW Status register bitmask */
+#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */
/* EEC Register */
#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */
@@ -1534,6 +1603,7 @@
/* EEPROM Addressing bits based on type (0-small, 1-large) */
#define IXGBE_EEC_ADDR_SIZE 0x00000400
#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
+#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
#define IXGBE_EEC_SIZE_SHIFT 11
#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
@@ -1563,8 +1633,10 @@
#define IXGBE_FW_PTR 0x0F
#define IXGBE_PBANUM0_PTR 0x15
#define IXGBE_PBANUM1_PTR 0x16
-#define IXGBE_DEVICE_CAPS 0x2C
+#define IXGBE_FREE_SPACE_PTR 0X3E
#define IXGBE_SAN_MAC_ADDR_PTR 0x28
+#define IXGBE_DEVICE_CAPS 0x2C
+#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -1601,6 +1673,10 @@
#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
+#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
+
#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
#endif
@@ -1616,14 +1692,25 @@
#define IXGBE_FLUDONE_ATTEMPTS 20000
#endif
+#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */
+#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */
+#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */
+#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */
+
#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
+#define IXGBE_FW_LESM_STATE_1 0x1
+#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
-#define IXGBE_FW_PATCH_VERSION_4 0x7
-
-/* Alternative SAN MAC Address Block */
+#define IXGBE_FW_PATCH_VERSION_4 0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */
#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
@@ -1688,6 +1775,7 @@
/* Transmit Config masks */
#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
/* Enable short packet padding to 64 bytes */
#define IXGBE_TX_PAD_ENABLE 0x00000400
#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */
@@ -1701,9 +1789,9 @@
#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
-#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
@@ -1719,6 +1807,8 @@
#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCE_SHIFT 4
+
/* Multiple Receive Queue Control */
#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */
#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */
@@ -1859,6 +1949,8 @@
#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
#define IXGBE_RXDADV_SPH 0x8000
@@ -1934,15 +2026,6 @@
#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
-/* Little Endian defines */
-#ifndef __le32
-#define __le32 u32
-#endif
-#ifndef __le64
-#define __le64 u64
-
-#endif
-
enum ixgbe_fdir_pballoc_type {
IXGBE_FDIR_PBALLOC_64K = 0,
IXGBE_FDIR_PBALLOC_128K,
@@ -2141,8 +2224,6 @@ typedef u32 ixgbe_link_speed;
IXGBE_LINK_SPEED_1GB_FULL | \
IXGBE_LINK_SPEED_10GB_FULL)
-#define IXGBE_PCIE_DEV_CTRL_2 0xC8
-#define PCIE_COMPL_TO_VALUE 0x05
/* Physical layer type */
typedef u32 ixgbe_physical_layer;
@@ -2315,6 +2396,7 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_lco,
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
@@ -2478,6 +2560,10 @@ struct ixgbe_hw_stats {
u64 fcoeptc;
u64 fcoedwrc;
u64 fcoedwtc;
+ u64 b2ospc;
+ u64 b2ogprc;
+ u64 o2bgptc;
+ u64 o2bspc;
};
/* forward declaration */
@@ -2491,7 +2577,9 @@ typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
struct ixgbe_eeprom_operations {
s32 (*init_params)(struct ixgbe_hw *);
s32 (*read)(struct ixgbe_hw *, u16, u16 *);
+ s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
s32 (*write)(struct ixgbe_hw *, u16, u16);
+ s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
u16 (*calc_checksum)(struct ixgbe_hw *);
@@ -2577,6 +2665,7 @@ struct ixgbe_eeprom_info {
u32 semaphore_delay;
u16 word_size;
u16 address_bits;
+ u16 word_page_size;
};
#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
@@ -2597,6 +2686,7 @@ struct ixgbe_mac_info {
u32 vft_size;
u32 num_rar_entries;
u32 rar_highwater;
+ u32 rx_pb_size;
u32 max_tx_queues;
u32 max_rx_queues;
u32 max_msix_vectors;
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
index f47e93fe32be..4ed687be2fe3 100644
--- a/drivers/net/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -37,6 +37,7 @@
#define IXGBE_X540_RAR_ENTRIES 128
#define IXGBE_X540_MC_TBL_SIZE 128
#define IXGBE_X540_VFT_TBL_SIZE 128
+#define IXGBE_X540_RX_PB_SIZE 384
static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
@@ -226,6 +227,28 @@ mac_reset_top:
}
/**
+ * ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+{
+ s32 ret_val = 0;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != 0)
+ goto out;
+
+ ret_val = ixgbe_start_hw_gen2(hw);
+ hw->mac.rx_pb_size = IXGBE_X540_RX_PB_SIZE;
+out:
+ return ret_val;
+}
+
+/**
* ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
* @hw: pointer to hardware structure
*
@@ -281,74 +304,105 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
}
/**
- * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to read
- * @data: word read from the EERPOM
+ * ixgbe_read_eerd_X540- Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the EERD register.
**/
static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
{
- s32 status;
+ s32 status = 0;
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0)
status = ixgbe_read_eerd_generic(hw, offset, data);
else
status = IXGBE_ERR_SWFW_SYNC;
- ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
- * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
- * @hw: pointer to hardware structure
- * @offset: offset of word in the EEPROM to write
- * @data: word write to the EEPROM
+ * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
*
- * Write a 16 bit word to the EEPROM using the EEWR register.
+ * Reads a 16 bit word(s) from the EEPROM using the EERD register.
**/
-static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
{
- u32 eewr;
- s32 status;
+ s32 status = 0;
- hw->eeprom.ops.init_params(hw);
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0)
+ status = ixgbe_read_eerd_buffer_generic(hw, offset,
+ words, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
- if (offset >= hw->eeprom.word_size) {
- status = IXGBE_ERR_EEPROM;
- goto out;
- }
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
- eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
- (data << IXGBE_EEPROM_RW_REG_DATA) |
- IXGBE_EEPROM_RW_REG_START;
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status = 0;
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
- status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
- if (status != 0) {
- hw_dbg(hw, "Eeprom write EEWR timed out\n");
- goto out;
- }
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
+ status = ixgbe_write_eewr_generic(hw, offset, data);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
- IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+ return status;
+}
- status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
- if (status != 0) {
- hw_dbg(hw, "Eeprom write EEWR timed out\n");
- goto out;
- }
- } else {
+/**
+ * ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+ u16 offset, u16 words, u16 *data)
+{
+ s32 status = 0;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+ 0)
+ status = ixgbe_write_eewr_buffer_generic(hw, offset,
+ words, data);
+ else
status = IXGBE_ERR_SWFW_SYNC;
- }
-out:
- ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
/**
- * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
- * @hw: pointer to hardware structure
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ *
+ * This function does not use synchronization for EERD and EEWR. It can
+ * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
+ *
+ * @hw: pointer to hardware structure
**/
static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
@@ -359,9 +413,15 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
u16 pointer = 0;
u16 word = 0;
+ /*
+ * Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores here. Instead use
+ * ixgbe_read_eerd_generic
+ */
+
/* Include 0x0-0x3F in the checksum */
for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
- if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+ if (ixgbe_read_eerd_generic(hw, i, &word) != 0) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
@@ -376,7 +436,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
continue;
- if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
+ if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
@@ -386,7 +446,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
pointer >= hw->eeprom.word_size)
continue;
- if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
+ if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
@@ -397,7 +457,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
continue;
for (j = pointer+1; j <= pointer+length; j++) {
- if (hw->eeprom.ops.read(hw, j, &word) != 0) {
+ if (ixgbe_read_eerd_generic(hw, j, &word) != 0) {
hw_dbg(hw, "EEPROM read failed\n");
break;
}
@@ -411,6 +471,62 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ **/
+static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+ u16 *checksum_val)
+{
+ s32 status;
+ u16 checksum;
+ u16 read_checksum = 0;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status != 0) {
+ hw_dbg(hw, "EEPROM read failed\n");
+ goto out;
+ }
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+
+ /*
+ * Do not use hw->eeprom.ops.read because we do not want to take
+ * the synchronization semaphores twice here.
+ */
+ ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ &read_checksum);
+
+ /*
+ * Verify read checksum from EEPROM is the same as
+ * calculated checksum
+ */
+ if (read_checksum != checksum)
+ status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+ /* If the user cares, return the calculated checksum */
+ if (checksum_val)
+ *checksum_val = checksum;
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+out:
+ return status;
+}
+
+/**
* ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
* @hw: pointer to hardware structure
*
@@ -421,11 +537,35 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
{
s32 status;
+ u16 checksum;
+
+ /*
+ * Read the first word from the EEPROM. If this times out or fails, do
+ * not continue or we could be in for a very long wait while every
+ * EEPROM read fails
+ */
+ status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+ if (status != 0)
+ hw_dbg(hw, "EEPROM read failed\n");
- status = ixgbe_update_eeprom_checksum_generic(hw);
+ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
+ checksum = hw->eeprom.ops.calc_checksum(hw);
+
+ /*
+ * Do not use hw->eeprom.ops.write because we do not want to
+ * take the synchronization semaphores twice here.
+ */
+ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
+ checksum);
- if (status)
+ if (status == 0)
status = ixgbe_update_flash_X540(hw);
+ else
+ status = IXGBE_ERR_SWFW_SYNC;
+ }
+
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
return status;
}
@@ -452,7 +592,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
status = ixgbe_poll_flash_update_done_X540(hw);
- if (status)
+ if (status == 0)
hw_dbg(hw, "Flash update complete\n");
else
hw_dbg(hw, "Flash update time out\n");
@@ -466,11 +606,10 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
}
status = ixgbe_poll_flash_update_done_X540(hw);
- if (status)
+ if (status == 0)
hw_dbg(hw, "Flash update complete\n");
else
hw_dbg(hw, "Flash update time out\n");
-
}
out:
return status;
@@ -542,7 +681,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
* resource (swmask)
*/
ixgbe_release_swfw_sync_semaphore(hw);
- msleep(5);
+ usleep_range(5000, 10000);
}
}
@@ -564,7 +703,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
}
}
- msleep(5);
+ usleep_range(5000, 10000);
return 0;
}
@@ -573,7 +712,7 @@ static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
* @hw: pointer to hardware structure
* @mask: Mask to specify which semaphore to release
*
- * Releases the SWFW semaphore throught the SW_FW_SYNC register
+ * Releases the SWFW semaphore through the SW_FW_SYNC register
* for the specified function (CSR, PHY0, PHY1, EVM, Flash)
**/
static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
@@ -588,7 +727,7 @@ static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
ixgbe_release_swfw_sync_semaphore(hw);
- msleep(5);
+ usleep_range(5000, 10000);
}
/**
@@ -658,10 +797,70 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
}
+/**
+ * ixgbe_blink_led_start_X540 - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+
+ /*
+ * In order for the blink bit in the LED control register
+ * to work, link and speed must be forced in the MAC. We
+ * will reverse this when we stop the blinking.
+ */
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+
+ /* Set the LED to LINK_UP + BLINK. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/**
+ * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ *
+ * Devices that implement the version 2 interface:
+ * X540
+ **/
+static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+{
+ u32 macc_reg;
+ u32 ledctl_reg;
+
+ /* Restore the LED to its default value. */
+ ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+ ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+ ledctl_reg &= ~IXGBE_LED_BLINK(index);
+ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+
+ /* Unforce link and speed in the MAC. */
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
static struct ixgbe_mac_operations mac_ops_X540 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_X540,
- .start_hw = &ixgbe_start_hw_generic,
+ .start_hw = &ixgbe_start_hw_X540,
.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
.get_media_type = &ixgbe_get_media_type_X540,
.get_supported_physical_layer =
@@ -669,7 +868,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.enable_rx_dma = &ixgbe_enable_rx_dma_generic,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
- .get_device_caps = NULL,
+ .get_device_caps = &ixgbe_get_device_caps_generic,
.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
.stop_adapter = &ixgbe_stop_adapter_generic,
.get_bus_info = &ixgbe_get_bus_info_generic,
@@ -681,8 +880,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
.led_on = &ixgbe_led_on_generic,
.led_off = &ixgbe_led_off_generic,
- .blink_led_start = &ixgbe_blink_led_start_generic,
- .blink_led_stop = &ixgbe_blink_led_stop_generic,
+ .blink_led_start = &ixgbe_blink_led_start_X540,
+ .blink_led_stop = &ixgbe_blink_led_stop_X540,
.set_rar = &ixgbe_set_rar_generic,
.clear_rar = &ixgbe_clear_rar_generic,
.set_vmdq = &ixgbe_set_vmdq_generic,
@@ -705,9 +904,11 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.init_params = &ixgbe_init_eeprom_params_X540,
.read = &ixgbe_read_eerd_X540,
+ .read_buffer = &ixgbe_read_eerd_buffer_X540,
.write = &ixgbe_write_eewr_X540,
+ .write_buffer = &ixgbe_write_eewr_buffer_X540,
.calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
- .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
+ .validate_checksum = &ixgbe_validate_eeprom_checksum_X540,
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
};
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 0563ab29264e..deee3754b1f7 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -104,11 +104,13 @@ static int ixgbevf_get_settings(struct net_device *netdev,
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
if (link_up) {
- ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
- SPEED_10000 : SPEED_1000;
+ ethtool_cmd_speed_set(
+ ecmd,
+ (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ SPEED_10000 : SPEED_1000);
ecmd->duplex = DUPLEX_FULL;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 054ab05b7c6a..d7ab202fb95c 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1460,6 +1460,34 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
}
}
+static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int count = 0;
+
+ if ((netdev_uc_count(netdev)) > 10) {
+ printk(KERN_ERR "Too many unicast filters - No Space\n");
+ return -ENOSPC;
+ }
+
+ if (!netdev_uc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+ netdev_for_each_uc_addr(ha, netdev) {
+ hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
+ udelay(200);
+ }
+ } else {
+ /*
+ * If the list is empty then send message to PF driver to
+ * clear all macvlans on this VF.
+ */
+ hw->mac.ops.set_uc_addr(hw, 0, NULL);
+ }
+
+ return count;
+}
+
/**
* ixgbevf_set_rx_mode - Multicast set
* @netdev: network interface device structure
@@ -1476,6 +1504,8 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
/* reprogram multicast list */
if (hw->mac.ops.update_mc_addr_list)
hw->mac.ops.update_mc_addr_list(hw, netdev);
+
+ ixgbevf_write_uc_addr_list(netdev);
}
static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1925,7 +1955,7 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
}
/*
- * ixgbevf_set_num_queues: Allocate queues for device, feature dependant
+ * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index b2b5bf5daa3d..ea393eb03f3a 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -81,6 +81,7 @@
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
/* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index eecd3bf6833f..aa3682e8c473 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -216,6 +216,39 @@ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
return 0;
}
+static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ /*
+ * If index is one then this is the start of a new list and needs
+ * indication to the PF so it can do it's own list management.
+ * If it is zero then that tells the PF to just clear all of
+ * this VF's macvlans and there is no new list.
+ */
+ msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+ if (addr)
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ if (!ret_val)
+ if (msgbuf[0] ==
+ (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
+ ret_val = -ENOMEM;
+
+ return ret_val;
+}
+
/**
* ixgbevf_set_rar_vf - set device MAC address
* @hw: pointer to hardware structure
@@ -378,6 +411,7 @@ static struct ixgbe_mac_operations ixgbevf_mac_ops = {
.check_link = ixgbevf_check_mac_link_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
};
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 23eb114c149f..10306b492ee6 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -62,6 +62,7 @@ struct ixgbe_mac_operations {
/* RAR, Multicast, VLAN */
s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
+ s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*enable_mc)(struct ixgbe_hw *);
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 994c80939c7a..b5b174a8c149 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2230,17 +2230,9 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
jme_restart_rx_engine(jme);
}
- if (new_mtu > 1900) {
- netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6);
- } else {
- if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
- netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- if (test_bit(JME_FLAG_TSO, &jme->flags))
- netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
- }
-
netdev->mtu = new_mtu;
+ netdev_update_features(netdev);
+
jme_reset_link(jme);
return 0;
@@ -2563,7 +2555,8 @@ jme_set_settings(struct net_device *netdev,
struct jme_adapter *jme = netdev_priv(netdev);
int rc, fdc = 0;
- if (ecmd->speed == SPEED_1000 && ecmd->autoneg != AUTONEG_ENABLE)
+ if (ethtool_cmd_speed(ecmd) == SPEED_1000
+ && ecmd->autoneg != AUTONEG_ENABLE)
return -EINVAL;
/*
@@ -2640,19 +2633,20 @@ jme_set_msglevel(struct net_device *netdev, u32 value)
}
static u32
-jme_get_rx_csum(struct net_device *netdev)
+jme_fix_features(struct net_device *netdev, u32 features)
{
- struct jme_adapter *jme = netdev_priv(netdev);
- return jme->reg_rxmcs & RXMCS_CHECKSUM;
+ if (netdev->mtu > 1900)
+ features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
+ return features;
}
static int
-jme_set_rx_csum(struct net_device *netdev, u32 on)
+jme_set_features(struct net_device *netdev, u32 features)
{
struct jme_adapter *jme = netdev_priv(netdev);
spin_lock_bh(&jme->rxmcs_lock);
- if (on)
+ if (features & NETIF_F_RXCSUM)
jme->reg_rxmcs |= RXMCS_CHECKSUM;
else
jme->reg_rxmcs &= ~RXMCS_CHECKSUM;
@@ -2663,42 +2657,6 @@ jme_set_rx_csum(struct net_device *netdev, u32 on)
}
static int
-jme_set_tx_csum(struct net_device *netdev, u32 on)
-{
- struct jme_adapter *jme = netdev_priv(netdev);
-
- if (on) {
- set_bit(JME_FLAG_TXCSUM, &jme->flags);
- if (netdev->mtu <= 1900)
- netdev->features |=
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- } else {
- clear_bit(JME_FLAG_TXCSUM, &jme->flags);
- netdev->features &=
- ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- }
-
- return 0;
-}
-
-static int
-jme_set_tso(struct net_device *netdev, u32 on)
-{
- struct jme_adapter *jme = netdev_priv(netdev);
-
- if (on) {
- set_bit(JME_FLAG_TSO, &jme->flags);
- if (netdev->mtu <= 1900)
- netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
- } else {
- clear_bit(JME_FLAG_TSO, &jme->flags);
- netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
- }
-
- return 0;
-}
-
-static int
jme_nway_reset(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
@@ -2839,11 +2797,6 @@ static const struct ethtool_ops jme_ethtool_ops = {
.get_link = jme_get_link,
.get_msglevel = jme_get_msglevel,
.set_msglevel = jme_set_msglevel,
- .get_rx_csum = jme_get_rx_csum,
- .set_rx_csum = jme_set_rx_csum,
- .set_tx_csum = jme_set_tx_csum,
- .set_tso = jme_set_tso,
- .set_sg = ethtool_op_set_sg,
.nway_reset = jme_nway_reset,
.get_eeprom_len = jme_get_eeprom_len,
.get_eeprom = jme_get_eeprom,
@@ -2903,6 +2856,8 @@ static const struct net_device_ops jme_netdev_ops = {
.ndo_change_mtu = jme_change_mtu,
.ndo_tx_timeout = jme_tx_timeout,
.ndo_vlan_rx_register = jme_vlan_rx_register,
+ .ndo_fix_features = jme_fix_features,
+ .ndo_set_features = jme_set_features,
};
static int __devinit
@@ -2957,6 +2912,12 @@ jme_init_one(struct pci_dev *pdev,
netdev->netdev_ops = &jme_netdev_ops;
netdev->ethtool_ops = &jme_ethtool_ops;
netdev->watchdog_timeo = TX_TIMEOUT;
+ netdev->hw_features = NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_RXCSUM;
netdev->features = NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM |
NETIF_F_SG |
@@ -3040,8 +3001,9 @@ jme_init_one(struct pci_dev *pdev,
jme->reg_txpfc = 0;
jme->reg_pmcs = PMCS_MFEN;
jme->reg_gpreg1 = GPREG1_DEFAULT;
- set_bit(JME_FLAG_TXCSUM, &jme->flags);
- set_bit(JME_FLAG_TSO, &jme->flags);
+
+ if (jme->reg_rxmcs & RXMCS_CHECKSUM)
+ netdev->features |= NETIF_F_RXCSUM;
/*
* Get Max Read Req Size from PCI Config Space
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 8bf30451e821..e9aaeca96abc 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -468,8 +468,6 @@ struct jme_adapter {
enum jme_flags_bits {
JME_FLAG_MSI = 1,
JME_FLAG_SSET = 2,
- JME_FLAG_TXCSUM = 3,
- JME_FLAG_TSO = 4,
JME_FLAG_POLL = 5,
JME_FLAG_SHUTDOWN = 6,
};
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index efd44afeae83..f0d8346d0fa5 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -321,7 +321,7 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
/* RX 2 kb high watermark */
ks8842_write16(adapter, 0, 0x1000, REG_QRFCR);
- /* aggresive back off in half duplex */
+ /* aggressive back off in half duplex */
ks8842_enable_bits(adapter, 32, 1 << 8, REG_SGCR1);
/* enable no excessive collison drop */
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 0fa4a9887ba2..bcd9ba68c9f2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -141,7 +141,7 @@ static int msg_enable;
*
* All these calls issue SPI transactions to access the chip's registers. They
* all require that the necessary lock is held to prevent accesses when the
- * chip is busy transfering packet data (RX/TX FIFO accesses).
+ * chip is busy transferring packet data (RX/TX FIFO accesses).
*/
/**
@@ -483,7 +483,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
*
* This form of operation would require us to hold the SPI bus'
* chipselect low during the entie transaction to avoid any
- * reset to the data stream comming from the chip.
+ * reset to the data stream coming from the chip.
*/
for (; rxfc != 0; rxfc--) {
@@ -634,7 +634,7 @@ static void ks8851_irq_work(struct work_struct *work)
/**
* calc_txlen - calculate size of message to send packet
- * @len: Lenght of data
+ * @len: Length of data
*
* Returns the size of the TXFIFO message needed to send
* this packet.
@@ -1472,7 +1472,7 @@ static int ks8851_phy_reg(int reg)
* @reg: The register to read.
*
* This call reads data from the PHY register specified in @reg. Since the
- * device does not support all the MII registers, the non-existant values
+ * device does not support all the MII registers, the non-existent values
* are always returned as zero.
*
* We return zero for unsupported registers as the MII code does not check
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 2e2c69b24062..61631cace913 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -470,7 +470,7 @@ static int msg_enable;
*
* All these calls issue transactions to access the chip's registers. They
* all require that the necessary lock is held to prevent accesses when the
- * chip is busy transfering packet data (RX/TX FIFO accesses).
+ * chip is busy transferring packet data (RX/TX FIFO accesses).
*/
/**
@@ -1364,7 +1364,7 @@ static int ks_phy_reg(int reg)
* @reg: The register to read.
*
* This call reads data from the PHY register specified in @reg. Since the
- * device does not support all the MII registers, the non-existant values
+ * device does not support all the MII registers, the non-existent values
* are always returned as zero.
*
* We return zero for unsupported registers as the MII code does not check
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 7f7d5708a658..41ea5920c158 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -1221,7 +1221,6 @@ struct ksz_port_info {
#define LINK_INT_WORKING (1 << 0)
#define SMALL_PACKET_TX_BUG (1 << 1)
#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
-#define IPV6_CSUM_GEN_HACK (1 << 3)
#define RX_HUGE_FRAME (1 << 4)
#define STP_SUPPORT (1 << 8)
@@ -3748,7 +3747,6 @@ static int hw_init(struct ksz_hw *hw)
if (1 == rc)
hw->features |= HALF_DUPLEX_SIGNAL_BUG;
}
- hw->features |= IPV6_CSUM_GEN_HACK;
return rc;
}
@@ -4887,8 +4885,7 @@ static netdev_tx_t netdev_tx(struct sk_buff *skb, struct net_device *dev)
left = hw_alloc_pkt(hw, skb->len, num);
if (left) {
if (left < num ||
- ((hw->features & IPV6_CSUM_GEN_HACK) &&
- (CHECKSUM_PARTIAL == skb->ip_summed) &&
+ ((CHECKSUM_PARTIAL == skb->ip_summed) &&
(ETH_P_IPV6 == htons(skb->protocol)))) {
struct sk_buff *org_skb = skb;
@@ -6001,6 +5998,7 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_port *port = &priv->port;
+ u32 speed = ethtool_cmd_speed(cmd);
int rc;
/*
@@ -6009,11 +6007,11 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
*/
if (cmd->autoneg && priv->advertising == cmd->advertising) {
cmd->advertising |= ADVERTISED_ALL;
- if (10 == cmd->speed)
+ if (10 == speed)
cmd->advertising &=
~(ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half);
- else if (100 == cmd->speed)
+ else if (100 == speed)
cmd->advertising &=
~(ADVERTISED_10baseT_Full |
ADVERTISED_10baseT_Half);
@@ -6035,8 +6033,8 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
port->force_link = 0;
} else {
port->duplex = cmd->duplex + 1;
- if (cmd->speed != 1000)
- port->speed = cmd->speed;
+ if (1000 != speed)
+ port->speed = speed;
if (cmd->autoneg)
port->force_link = 0;
else
@@ -6583,57 +6581,33 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
}
/**
- * netdev_get_rx_csum - get receive checksum support
+ * netdev_set_features - set receive checksum support
* @dev: Network device.
- *
- * This function gets receive checksum support setting.
- *
- * Return true if receive checksum is enabled; false otherwise.
- */
-static u32 netdev_get_rx_csum(struct net_device *dev)
-{
- struct dev_priv *priv = netdev_priv(dev);
- struct dev_info *hw_priv = priv->adapter;
- struct ksz_hw *hw = &hw_priv->hw;
-
- return hw->rx_cfg &
- (DMA_RX_CSUM_UDP |
- DMA_RX_CSUM_TCP |
- DMA_RX_CSUM_IP);
-}
-
-/**
- * netdev_set_rx_csum - set receive checksum support
- * @dev: Network device.
- * @data: Zero to disable receive checksum support.
+ * @features: New device features (offloads).
*
* This function sets receive checksum support setting.
*
* Return 0 if successful; otherwise an error code.
*/
-static int netdev_set_rx_csum(struct net_device *dev, u32 data)
+static int netdev_set_features(struct net_device *dev, u32 features)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
struct ksz_hw *hw = &hw_priv->hw;
- u32 new_setting = hw->rx_cfg;
- if (data)
- new_setting |=
- (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
- DMA_RX_CSUM_IP);
- else
- new_setting &=
- ~(DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
- DMA_RX_CSUM_IP);
- new_setting &= ~DMA_RX_CSUM_UDP;
mutex_lock(&hw_priv->lock);
- if (new_setting != hw->rx_cfg) {
- hw->rx_cfg = new_setting;
- if (hw->enabled)
- writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
- }
+
+ /* see note in hw_setup() */
+ if (features & NETIF_F_RXCSUM)
+ hw->rx_cfg |= DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP;
+ else
+ hw->rx_cfg &= ~(DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
+
+ if (hw->enabled)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+
mutex_unlock(&hw_priv->lock);
+
return 0;
}
@@ -6658,12 +6632,6 @@ static struct ethtool_ops netdev_ethtool_ops = {
.get_strings = netdev_get_strings,
.get_sset_count = netdev_get_sset_count,
.get_ethtool_stats = netdev_get_ethtool_stats,
- .get_rx_csum = netdev_get_rx_csum,
- .set_rx_csum = netdev_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
};
/*
@@ -6828,14 +6796,15 @@ static int __init netdev_init(struct net_device *dev)
/* 500 ms timeout */
dev->watchdog_timeo = HZ / 2;
- dev->features |= NETIF_F_IP_CSUM;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
/*
* Hardware does not really support IPv6 checksum generation, but
- * driver actually runs faster with this on. Refer IPV6_CSUM_GEN_HACK.
+ * driver actually runs faster with this on.
*/
- dev->features |= NETIF_F_IPV6_CSUM;
- dev->features |= NETIF_F_SG;
+ dev->hw_features |= NETIF_F_IPV6_CSUM;
+
+ dev->features |= dev->hw_features;
sema_init(&priv->proc_sem, 1);
@@ -6860,6 +6829,7 @@ static const struct net_device_ops netdev_ops = {
.ndo_start_xmit = netdev_tx,
.ndo_tx_timeout = netdev_tx_timeout,
.ndo_change_mtu = netdev_change_mtu,
+ .ndo_set_features = netdev_set_features,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = netdev_ioctl,
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index da74db4a03d4..17b75e5f1b0a 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -35,7 +35,7 @@
Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
Paul Gortmaker : tweak ANK's above multicast changes a bit.
Paul Gortmaker : update packet statistics for v2.1.x
- Alan Cox : support arbitary stupid port mappings on the
+ Alan Cox : support arbitrary stupid port mappings on the
68K Macintosh. Support >16bit I/O spaces
Paul Gortmaker : add kmod support for auto-loading of the 8390
module by all drivers that require it.
@@ -121,7 +121,7 @@ static void __NS8390_init(struct net_device *dev, int startp);
/*
* SMP and the 8390 setup.
*
- * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
+ * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
* a page register that controls bank and packet buffer access. We guard
* this with ei_local->page_lock. Nobody should assume or set the page other
* than zero when the lock is not held. Lock holders must restore page 0
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index ea0dc451da9c..4ce9e5f2c069 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -173,7 +173,9 @@ static void loopback_setup(struct net_device *dev)
| NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
- | NETIF_F_NETNS_LOCAL;
+ | NETIF_F_NETNS_LOCAL
+ | NETIF_F_VLAN_CHALLENGED
+ | NETIF_F_LOOPBACK;
dev->ethtool_ops = &loopback_ethtool_ops;
dev->header_ops = &eth_header_ops;
dev->netdev_ops = &loopback_ops;
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 3698824744cb..385a95311cd2 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -27,7 +27,7 @@
Credits:
Thanks to Murphy Software BV for letting me write this in their time.
- Well, actually, I get payed doing this...
+ Well, actually, I get paid doing this...
(Also: see http://www.murphy.nl for murphy, and my homepage ~ard for
more information on the Professional Workstation)
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 79ccb54ab00c..629bd2649c0c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -576,6 +576,11 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
* add that if/when we get our hands on a full-blown MII PHY.
*/
+ if (status & MACB_BIT(ISR_ROVR)) {
+ /* We missed at least one packet */
+ bp->hw_stats.rx_overruns++;
+ }
+
if (status & MACB_BIT(HRESP)) {
/*
* TODO: Reset the hardware, and maybe move the printk
@@ -1024,7 +1029,8 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
hwstat->rx_jabbers +
hwstat->rx_undersize_pkts +
hwstat->rx_length_mismatch);
- nstat->rx_over_errors = hwstat->rx_resource_errors;
+ nstat->rx_over_errors = hwstat->rx_resource_errors +
+ hwstat->rx_overruns;
nstat->rx_crc_errors = hwstat->rx_fcs_errors;
nstat->rx_frame_errors = hwstat->rx_align_errors;
nstat->rx_fifo_errors = hwstat->rx_overruns;
@@ -1171,8 +1177,7 @@ static int __init macb_probe(struct platform_device *pdev)
}
dev->irq = platform_get_irq(pdev, 0);
- err = request_irq(dev->irq, macb_interrupt, IRQF_SAMPLE_RANDOM,
- dev->name, dev);
+ err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
if (err) {
printk(KERN_ERR
"%s: Unable to request IRQ %d (error %d)\n",
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 78e34e9e4f00..d7c0bc62da7f 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -415,7 +415,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO)
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM)
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -517,12 +517,6 @@ static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
snprintf(drvinfo->version, 32, "0.1");
}
-static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
-{
- const struct macvlan_dev *vlan = netdev_priv(dev);
- return dev_ethtool_get_rx_csum(vlan->lowerdev);
-}
-
static int macvlan_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
@@ -530,18 +524,10 @@ static int macvlan_ethtool_get_settings(struct net_device *dev,
return dev_ethtool_get_settings(vlan->lowerdev, cmd);
}
-static u32 macvlan_ethtool_get_flags(struct net_device *dev)
-{
- const struct macvlan_dev *vlan = netdev_priv(dev);
- return dev_ethtool_get_flags(vlan->lowerdev);
-}
-
static const struct ethtool_ops macvlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_settings = macvlan_ethtool_get_settings,
- .get_rx_csum = macvlan_ethtool_get_rx_csum,
.get_drvinfo = macvlan_ethtool_get_drvinfo,
- .get_flags = macvlan_ethtool_get_flags,
};
static const struct net_device_ops macvlan_netdev_ops = {
@@ -799,6 +785,7 @@ static int macvlan_device_event(struct notifier_block *unused,
struct net_device *dev = ptr;
struct macvlan_dev *vlan, *next;
struct macvlan_port *port;
+ LIST_HEAD(list_kill);
if (!macvlan_port_exists(dev))
return NOTIFY_DONE;
@@ -824,7 +811,9 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
list_for_each_entry_safe(vlan, next, &port->vlans, list)
- vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
+ vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ list_del(&list_kill);
break;
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index e85bf04cf813..16fbb11d92ac 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -176,6 +176,9 @@ static u32 mdio45_get_an(const struct mdio_if_info *mdio, u16 addr)
* @npage_adv: Modes currently advertised on next pages
* @npage_lpa: Modes advertised by link partner on next pages
*
+ * The @ecmd parameter is expected to have been cleared before calling
+ * mdio45_ethtool_gset_npage().
+ *
* Since the CSRs for auto-negotiation using next pages are not fully
* standardised, this function does not attempt to decode them. The
* caller must pass them in.
@@ -185,6 +188,7 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
u32 npage_adv, u32 npage_lpa)
{
int reg;
+ u32 speed;
ecmd->transceiver = XCVR_INTERNAL;
ecmd->phy_address = mdio->prtad;
@@ -287,33 +291,36 @@ void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
if (modes & (ADVERTISED_10000baseT_Full |
ADVERTISED_10000baseKX4_Full |
ADVERTISED_10000baseKR_Full)) {
- ecmd->speed = SPEED_10000;
+ speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL;
} else if (modes & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseKX_Full)) {
- ecmd->speed = SPEED_1000;
+ speed = SPEED_1000;
ecmd->duplex = !(modes & ADVERTISED_1000baseT_Half);
} else if (modes & (ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half)) {
- ecmd->speed = SPEED_100;
+ speed = SPEED_100;
ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full);
} else {
- ecmd->speed = SPEED_10;
+ speed = SPEED_10;
ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full);
}
} else {
/* Report forced settings */
reg = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
MDIO_CTRL1);
- ecmd->speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1) *
- ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10));
+ speed = (((reg & MDIO_PMA_CTRL1_SPEED1000) ? 100 : 1)
+ * ((reg & MDIO_PMA_CTRL1_SPEED100) ? 100 : 10));
ecmd->duplex = (reg & MDIO_CTRL1_FULLDPLX ||
- ecmd->speed == SPEED_10000);
+ speed == SPEED_10000);
}
+ ethtool_cmd_speed_set(ecmd, speed);
+
/* 10GBASE-T MDI/MDI-X */
- if (ecmd->port == PORT_TP && ecmd->speed == SPEED_10000) {
+ if (ecmd->port == PORT_TP
+ && (ethtool_cmd_speed(ecmd) == SPEED_10000)) {
switch (mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_PMAPMD,
MDIO_PMA_10GBT_SWAPPOL)) {
case MDIO_PMA_10GBT_SWAPPOL_ABNX | MDIO_PMA_10GBT_SWAPPOL_CDNX:
diff --git a/drivers/net/meth.h b/drivers/net/meth.h
index a78dc1ca8c29..5b145c6bad60 100644
--- a/drivers/net/meth.h
+++ b/drivers/net/meth.h
@@ -144,7 +144,7 @@ typedef struct rx_packet {
/* Bits 22 through 28 are used to determine IPGR2 */
#define METH_REV_SHIFT 29 /* Bits 29 through 31 are used to determine the revision */
- /* 000: Inital revision */
+ /* 000: Initial revision */
/* 001: First revision, Improved TX concatenation */
@@ -193,7 +193,7 @@ typedef struct rx_packet {
/* 1: A TX message had the INT request bit set, the packet has been sent. */
#define METH_INT_TX_LINK_FAIL BIT(2) /* 0: No interrupt pending, 1: PHY has reported a link failure */
#define METH_INT_MEM_ERROR BIT(3) /* 0: No interrupt pending */
- /* 1: A memory error occurred durring DMA, DMA stopped, Fatal */
+ /* 1: A memory error occurred during DMA, DMA stopped, Fatal */
#define METH_INT_TX_ABORT BIT(4) /* 0: No interrupt pending, 1: The TX aborted operation, DMA stopped, FATAL */
#define METH_INT_RX_THRESHOLD BIT(5) /* 0: No interrupt pending, 1: Selected receive threshold condition Valid */
#define METH_INT_RX_UNDERFLOW BIT(6) /* 0: No interrupt pending, 1: FIFO was empty, packet could not be queued */
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index 0a6c6a2e7550..c62e7816d548 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -49,6 +49,10 @@ static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
result |= ADVERTISED_100baseT_Half;
if (advert & ADVERTISE_100FULL)
result |= ADVERTISED_100baseT_Full;
+ if (advert & ADVERTISE_PAUSE_CAP)
+ result |= ADVERTISED_Pause;
+ if (advert & ADVERTISE_PAUSE_ASYM)
+ result |= ADVERTISED_Asym_Pause;
return result;
}
@@ -58,6 +62,9 @@ static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
* @mii: MII interface
* @ecmd: requested ethtool_cmd
*
+ * The @ecmd parameter is expected to have been cleared before calling
+ * mii_ethtool_gset().
+ *
* Returns 0 for success, negative on error.
*/
int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
@@ -118,22 +125,25 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
if (nego & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseT_Half)) {
- ecmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full);
} else if (nego & (ADVERTISED_100baseT_Full |
ADVERTISED_100baseT_Half)) {
- ecmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full);
} else {
- ecmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full);
}
} else {
ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->speed = ((bmcr & BMCR_SPEED1000 &&
- (bmcr & BMCR_SPEED100) == 0) ? SPEED_1000 :
- (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10);
+ ethtool_cmd_speed_set(ecmd,
+ ((bmcr & BMCR_SPEED1000 &&
+ (bmcr & BMCR_SPEED100) == 0) ?
+ SPEED_1000 :
+ ((bmcr & BMCR_SPEED100) ?
+ SPEED_100 : SPEED_10)));
ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
}
@@ -154,10 +164,11 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
{
struct net_device *dev = mii->dev;
+ u32 speed = ethtool_cmd_speed(ecmd);
- if (ecmd->speed != SPEED_10 &&
- ecmd->speed != SPEED_100 &&
- ecmd->speed != SPEED_1000)
+ if (speed != SPEED_10 &&
+ speed != SPEED_100 &&
+ speed != SPEED_1000)
return -EINVAL;
if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
return -EINVAL;
@@ -169,7 +180,7 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
return -EINVAL;
if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
return -EINVAL;
- if ((ecmd->speed == SPEED_1000) && (!mii->supports_gmii))
+ if ((speed == SPEED_1000) && (!mii->supports_gmii))
return -EINVAL;
/* ignore supported, maxtxpkt, maxrxpkt */
@@ -227,9 +238,9 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR);
tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 |
BMCR_SPEED1000 | BMCR_FULLDPLX);
- if (ecmd->speed == SPEED_1000)
+ if (speed == SPEED_1000)
tmp |= BMCR_SPEED1000;
- else if (ecmd->speed == SPEED_100)
+ else if (speed == SPEED_100)
tmp |= BMCR_SPEED100;
if (ecmd->duplex == DUPLEX_FULL) {
tmp |= BMCR_FULLDPLX;
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index d54b7abf0225..2e858e4dcf4d 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -57,37 +57,6 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
drvinfo->eedump_len = 0;
}
-static u32 mlx4_en_get_tso(struct net_device *dev)
-{
- return (dev->features & NETIF_F_TSO) != 0;
-}
-
-static int mlx4_en_set_tso(struct net_device *dev, u32 data)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
-
- if (data) {
- if (!priv->mdev->LSO_support)
- return -EPERM;
- dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
- } else
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
- return 0;
-}
-
-static u32 mlx4_en_get_rx_csum(struct net_device *dev)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- return priv->rx_csum;
-}
-
-static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct mlx4_en_priv *priv = netdev_priv(dev);
- priv->rx_csum = (data != 0);
- return 0;
-}
-
static const char main_strings[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
@@ -296,10 +265,10 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
trans_type = priv->port_state.transciver;
if (netif_carrier_ok(dev)) {
- cmd->speed = priv->port_state.link_speed;
+ ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
cmd->duplex = DUPLEX_FULL;
} else {
- cmd->speed = -1;
+ ethtool_cmd_speed_set(cmd, -1);
cmd->duplex = -1;
}
@@ -323,7 +292,8 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
if ((cmd->autoneg == AUTONEG_ENABLE) ||
- (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
+ (ethtool_cmd_speed(cmd) != SPEED_10000) ||
+ (cmd->duplex != DUPLEX_FULL))
return -EINVAL;
/* Nothing to change */
@@ -483,17 +453,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
.set_settings = mlx4_en_set_settings,
-#ifdef NETIF_F_TSO
- .get_tso = mlx4_en_get_tso,
- .set_tso = mlx4_en_set_tso,
-#endif
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
.get_link = ethtool_op_get_link,
- .get_rx_csum = mlx4_en_get_rx_csum,
- .set_rx_csum = mlx4_en_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
.get_strings = mlx4_en_get_strings,
.get_sset_count = mlx4_en_get_sset_count,
.get_ethtool_stats = mlx4_en_get_ethtool_stats,
@@ -508,7 +468,6 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
- .get_flags = ethtool_op_get_flags,
};
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 9317b61a75b8..9276b1b25586 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -236,7 +236,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
goto err_mr;
}
- /* Configure wich ports to start according to module parameters */
+ /* Configure which ports to start according to module parameters */
mdev->port_cnt = 0;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++;
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 4f158baa0246..61850adae6f7 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -247,7 +247,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
priv->port);
if (err)
en_err(priv, "Failed enabling "
- "promiscous mode\n");
+ "promiscuous mode\n");
/* Disable port multicast filter (unconditionally) */
err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
@@ -276,7 +276,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
}
/*
- * Not in promiscous mode
+ * Not in promiscuous mode
*/
if (priv->flags & MLX4_EN_FLAG_PROMISC) {
@@ -292,14 +292,14 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
priv->port);
if (err)
- en_err(priv, "Failed disabling promiscous mode\n");
+ en_err(priv, "Failed disabling promiscuous mode\n");
/* Disable Multicast promisc */
if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
priv->port);
if (err)
- en_err(priv, "Failed disabling multicast promiscous mode\n");
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
@@ -331,7 +331,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
priv->port);
if (err)
- en_err(priv, "Failed disabling multicast promiscous mode\n");
+ en_err(priv, "Failed disabling multicast promiscuous mode\n");
priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
}
@@ -1083,7 +1083,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->prof = prof;
priv->port = port;
priv->port_up = false;
- priv->rx_csum = 1;
priv->flags = prof->flags;
priv->tx_ring_num = prof->tx_ring_num;
priv->rx_ring_num = prof->rx_ring_num;
@@ -1141,21 +1140,16 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/*
* Set driver features
*/
- dev->features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_SG;
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- dev->features |= NETIF_F_HIGHDMA;
- dev->features |= NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
- dev->features |= NETIF_F_GRO;
- if (mdev->LSO_support) {
- dev->features |= NETIF_F_TSO;
- dev->features |= NETIF_F_TSO6;
- dev->vlan_features |= NETIF_F_TSO;
- dev->vlan_features |= NETIF_F_TSO6;
- }
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ if (mdev->LSO_support)
+ dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+
+ dev->vlan_features = dev->hw_features;
+
+ dev->hw_features |= NETIF_F_RXCSUM;
+ dev->features = dev->hw_features | NETIF_F_HIGHDMA |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
mdev->pndev[port] = dev;
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 05998ee297c9..277215fb9d72 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -345,6 +345,8 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
err = mlx4_en_init_allocator(priv, ring);
if (err) {
en_err(priv, "Failed initializing ring allocator\n");
+ if (ring->stride <= TXBB_SIZE)
+ ring->buf -= TXBB_SIZE;
ring_ind--;
goto err_allocator;
}
@@ -369,6 +371,8 @@ err_buffers:
ring_ind = priv->rx_ring_num - 1;
err_allocator:
while (ring_ind >= 0) {
+ if (priv->rx_ring[ring_ind].stride <= TXBB_SIZE)
+ priv->rx_ring[ring_ind].buf -= TXBB_SIZE;
mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
ring_ind--;
}
@@ -580,7 +584,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
ring->bytes += length;
ring->packets++;
- if (likely(priv->rx_csum)) {
+ if (likely(dev->features & NETIF_F_RXCSUM)) {
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
(cqe->checksum == cpu_to_be16(0xffff))) {
priv->port_stats.rx_chksum_good++;
@@ -706,7 +710,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
}
-/* Calculate the last offset position that accomodates a full fragment
+/* Calculate the last offset position that accommodates a full fragment
* (assuming fagment size = stride-align) */
static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
{
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
index 9c91a92da705..191a8dcd8a93 100644
--- a/drivers/net/mlx4/en_selftest.c
+++ b/drivers/net/mlx4/en_selftest.c
@@ -149,7 +149,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
netif_carrier_off(dev);
retry_tx:
- /* Wait untill all tx queues are empty.
+ /* Wait until all tx queues are empty.
* there should not be any additional incoming traffic
* since we turned the carrier off */
msleep(200);
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index 01feb8fd42ad..b229acf1855f 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -636,7 +636,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!real_size))
goto tx_drop;
- /* Allign descriptor to TXBB size */
+ /* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
nr_txbb = desc_size / TXBB_SIZE;
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 62fa7eec5f0c..3814fc9b1145 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -944,6 +944,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
}
for (port = 1; port <= dev->caps.num_ports; port++) {
+ enum mlx4_port_type port_type = 0;
+ mlx4_SENSE_PORT(dev, port, &port_type);
+ if (port_type)
+ dev->caps.port_type[port] = port_type;
ib_port_default_caps = 0;
err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
if (err)
@@ -958,6 +962,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_mcg_table_free;
}
}
+ mlx4_set_port_mask(dev);
return 0;
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c6d336aed2d9..e63c37d6a115 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -222,7 +222,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* the given qpn is listed as a promisc qpn
* we need to add it as a duplicate to this entry
- * for future refernce */
+ * for future references */
list_for_each_entry(dqp, &entry->duplicates, list) {
if (qpn == dqp->qpn)
return 0; /* qp is already duplicated */
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index c1e0e5f1bcdb..dd7d745fbab4 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -431,6 +431,8 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_handle_catas_err(struct mlx4_dev *dev);
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+ enum mlx4_port_type *type);
void mlx4_do_sense_ports(struct mlx4_dev *dev,
enum mlx4_port_type *stype,
enum mlx4_port_type *defaults);
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index e30f6099c0de..0b5150df0585 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -451,7 +451,6 @@ struct mlx4_en_priv {
int registered;
int allocated;
int stride;
- int rx_csum;
u64 mac;
int mac_index;
unsigned max_mtu;
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index eca7d8596f87..8856659fb43c 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -172,7 +172,7 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- /* MAC already registered, increase refernce count */
+ /* MAC already registered, increase references count */
++table->refs[i];
goto out;
}
@@ -360,7 +360,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
if (table->refs[i] &&
(vlan == (MLX4_VLAN_MASK &
be32_to_cpu(table->entries[i])))) {
- /* Vlan already registered, increase refernce count */
+ /* Vlan already registered, increase references count */
*index = i;
++table->refs[i];
goto out;
diff --git a/drivers/net/mlx4/sense.c b/drivers/net/mlx4/sense.c
index 015fbe785c13..e2337a7411d9 100644
--- a/drivers/net/mlx4/sense.c
+++ b/drivers/net/mlx4/sense.c
@@ -38,8 +38,8 @@
#include "mlx4.h"
-static int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
- enum mlx4_port_type *type)
+int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
+ enum mlx4_port_type *type)
{
u64 out_param;
int err = 0;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 34425b94452f..a5d9b1c310b3 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1444,13 +1444,13 @@ mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
cmd->advertising = ADVERTISED_MII;
switch (port_status & PORT_SPEED_MASK) {
case PORT_SPEED_10:
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
break;
case PORT_SPEED_100:
- cmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(cmd, SPEED_100);
break;
case PORT_SPEED_1000:
- cmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
break;
default:
cmd->speed = -1;
@@ -1575,18 +1575,12 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
return 0;
}
-static u32
-mv643xx_eth_get_rx_csum(struct net_device *dev)
-{
- struct mv643xx_eth_private *mp = netdev_priv(dev);
-
- return !!(rdlp(mp, PORT_CONFIG) & 0x02000000);
-}
static int
-mv643xx_eth_set_rx_csum(struct net_device *dev, u32 rx_csum)
+mv643xx_eth_set_features(struct net_device *dev, u32 features)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
+ u32 rx_csum = features & NETIF_F_RXCSUM;
wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
@@ -1634,11 +1628,6 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
}
}
-static int mv643xx_eth_set_flags(struct net_device *dev, u32 data)
-{
- return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO);
-}
-
static int mv643xx_eth_get_sset_count(struct net_device *dev, int sset)
{
if (sset == ETH_SS_STATS)
@@ -1657,14 +1646,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
.set_coalesce = mv643xx_eth_set_coalesce,
.get_ringparam = mv643xx_eth_get_ringparam,
.set_ringparam = mv643xx_eth_set_ringparam,
- .get_rx_csum = mv643xx_eth_get_rx_csum,
- .set_rx_csum = mv643xx_eth_set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
.get_strings = mv643xx_eth_get_strings,
.get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
- .get_flags = ethtool_op_get_flags,
- .set_flags = mv643xx_eth_set_flags,
.get_sset_count = mv643xx_eth_get_sset_count,
};
@@ -2264,7 +2247,7 @@ static void port_start(struct mv643xx_eth_private *mp)
* frames to RX queue #0, and include the pseudo-header when
* calculating receive checksums.
*/
- wrlp(mp, PORT_CONFIG, 0x02000000);
+ mv643xx_eth_set_features(mp->dev, mp->dev->features);
/*
* Treat BPDUs as normal multicasts, and disable partition mode.
@@ -2848,6 +2831,7 @@ static const struct net_device_ops mv643xx_eth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mv643xx_eth_ioctl,
.ndo_change_mtu = mv643xx_eth_change_mtu,
+ .ndo_set_features = mv643xx_eth_set_features,
.ndo_tx_timeout = mv643xx_eth_tx_timeout,
.ndo_get_stats = mv643xx_eth_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2930,7 +2914,9 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_LRO;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 673dc600c891..b1358f79ba0a 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -205,7 +205,6 @@ struct myri10ge_priv {
int tx_boundary; /* boundary transmits cannot cross */
int num_slices;
int running; /* running? */
- int csum_flag; /* rx_csums? */
int small_bytes;
int big_bytes;
int max_intr_slots;
@@ -1386,7 +1385,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum,
skb->protocol = eth_type_trans(skb, dev);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
- if (mgp->csum_flag) {
+ if (dev->features & NETIF_F_RXCSUM) {
if ((skb->protocol == htons(ETH_P_IP)) ||
(skb->protocol == htons(ETH_P_IPV6))) {
skb->csum = csum;
@@ -1645,7 +1644,7 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
int i;
cmd->autoneg = AUTONEG_DISABLE;
- cmd->speed = SPEED_10000;
+ ethtool_cmd_speed_set(cmd, SPEED_10000);
cmd->duplex = DUPLEX_FULL;
/*
@@ -1757,43 +1756,6 @@ myri10ge_get_ringparam(struct net_device *netdev,
ring->tx_pending = ring->tx_max_pending;
}
-static u32 myri10ge_get_rx_csum(struct net_device *netdev)
-{
- struct myri10ge_priv *mgp = netdev_priv(netdev);
-
- if (mgp->csum_flag)
- return 1;
- else
- return 0;
-}
-
-static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
-{
- struct myri10ge_priv *mgp = netdev_priv(netdev);
- int err = 0;
-
- if (csum_enabled)
- mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
- else {
- netdev->features &= ~NETIF_F_LRO;
- mgp->csum_flag = 0;
-
- }
- return err;
-}
-
-static int myri10ge_set_tso(struct net_device *netdev, u32 tso_enabled)
-{
- struct myri10ge_priv *mgp = netdev_priv(netdev);
- u32 flags = mgp->features & (NETIF_F_TSO6 | NETIF_F_TSO);
-
- if (tso_enabled)
- netdev->features |= flags;
- else
- netdev->features &= ~flags;
- return 0;
-}
-
static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
@@ -1944,11 +1906,6 @@ static u32 myri10ge_get_msglevel(struct net_device *netdev)
return mgp->msg_enable;
}
-static int myri10ge_set_flags(struct net_device *netdev, u32 value)
-{
- return ethtool_op_set_flags(netdev, value, ETH_FLAG_LRO);
-}
-
static const struct ethtool_ops myri10ge_ethtool_ops = {
.get_settings = myri10ge_get_settings,
.get_drvinfo = myri10ge_get_drvinfo,
@@ -1957,19 +1914,12 @@ static const struct ethtool_ops myri10ge_ethtool_ops = {
.get_pauseparam = myri10ge_get_pauseparam,
.set_pauseparam = myri10ge_set_pauseparam,
.get_ringparam = myri10ge_get_ringparam,
- .get_rx_csum = myri10ge_get_rx_csum,
- .set_rx_csum = myri10ge_set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
- .set_sg = ethtool_op_set_sg,
- .set_tso = myri10ge_set_tso,
.get_link = ethtool_op_get_link,
.get_strings = myri10ge_get_strings,
.get_sset_count = myri10ge_get_sset_count,
.get_ethtool_stats = myri10ge_get_ethtool_stats,
.set_msglevel = myri10ge_set_msglevel,
.get_msglevel = myri10ge_get_msglevel,
- .get_flags = ethtool_op_get_flags,
- .set_flags = myri10ge_set_flags
};
static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
@@ -3136,6 +3086,14 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
+static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
+{
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
{
struct myri10ge_priv *mgp = netdev_priv(dev);
@@ -3702,7 +3660,7 @@ abort:
/*
* This function determines the number of slices supported.
- * The number slices is the minumum of the number of CPUS,
+ * The number slices is the minimum of the number of CPUS,
* the number of MSI-X irqs supported, the number of slices
* supported by the firmware
*/
@@ -3834,6 +3792,7 @@ static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_get_stats = myri10ge_get_stats,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = myri10ge_change_mtu,
+ .ndo_fix_features = myri10ge_fix_features,
.ndo_set_multicast_list = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
};
@@ -3860,7 +3819,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mgp = netdev_priv(netdev);
mgp->dev = netdev;
mgp->pdev = pdev;
- mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
mgp->pause = myri10ge_flow_control;
mgp->intr_coal_delay = myri10ge_intr_coal_delay;
mgp->msg_enable = netif_msg_init(myri10ge_debug, MYRI10GE_MSG_DEFAULT);
@@ -3976,11 +3934,11 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &myri10ge_netdev_ops;
netdev->mtu = myri10ge_initial_mtu;
netdev->base_addr = mgp->iomem_base;
- netdev->features = mgp->features;
+ netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
+ netdev->features = netdev->hw_features;
if (dac_enabled)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->features |= NETIF_F_LRO;
netdev->vlan_features |= mgp->features;
if (mgp->fw_ver_tiny < 37)
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index a761076b69c3..53aeea4b536e 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -1009,7 +1009,7 @@ static int __devinit myri_sbus_probe(struct platform_device *op)
/* Map in the MyriCOM register/localram set. */
if (mp->eeprom.cpuvers < CPUVERS_4_0) {
- /* XXX Makes no sense, if control reg is non-existant this
+ /* XXX Makes no sense, if control reg is non-existent this
* XXX driver cannot function at all... maybe pre-4.0 is
* XXX only a valid version for PCI cards? Ask feldy...
*/
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 2fd39630b1e5..b78be088c4ad 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -203,7 +203,7 @@ skbuff at an offset of "+2", 16-byte aligning the IP header.
IIId. Synchronization
Most operations are synchronized on the np->lock irq spinlock, except the
-recieve and transmit paths which are synchronised using a combination of
+receive and transmit paths which are synchronised using a combination of
hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
IVb. References
@@ -726,7 +726,7 @@ static void move_int_phy(struct net_device *dev, int addr)
* There are two addresses we must avoid:
* - the address on the external phy that is used for transmission.
* - the address that we want to access. User space can access phys
- * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independant from the
+ * on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independent from the
* phy that is used for transmission.
*/
@@ -860,6 +860,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
prev_eedata = eedata;
}
+ /* Store MAC Address in perm_addr */
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
dev->base_addr = (unsigned long __force) ioaddr;
dev->irq = irq;
@@ -1982,7 +1985,7 @@ static void init_ring(struct net_device *dev)
np->rx_head_desc = &np->rx_ring[0];
- /* Please be carefull before changing this loop - at least gcc-2.95.1
+ /* Please be careful before changing this loop - at least gcc-2.95.1
* miscompiles it otherwise.
*/
/* Initialize all Rx descriptors. */
@@ -2817,7 +2820,7 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
u32 tmp;
ecmd->port = dev->if_port;
- ecmd->speed = np->speed;
+ ethtool_cmd_speed_set(ecmd, np->speed);
ecmd->duplex = np->duplex;
ecmd->autoneg = np->autoneg;
ecmd->advertising = 0;
@@ -2875,9 +2878,9 @@ static int netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
tmp = mii_nway_result(
np->advertising & mdio_read(dev, MII_LPA));
if (tmp == LPA_100FULL || tmp == LPA_100HALF)
- ecmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
else
- ecmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
if (tmp == LPA_100FULL || tmp == LPA_10FULL)
ecmd->duplex = DUPLEX_FULL;
else
@@ -2905,7 +2908,8 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EINVAL;
}
} else if (ecmd->autoneg == AUTONEG_DISABLE) {
- if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100)
+ u32 speed = ethtool_cmd_speed(ecmd);
+ if (speed != SPEED_10 && speed != SPEED_100)
return -EINVAL;
if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
return -EINVAL;
@@ -2953,7 +2957,7 @@ static int netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
if (ecmd->advertising & ADVERTISED_100baseT_Full)
np->advertising |= ADVERTISE_100FULL;
} else {
- np->speed = ecmd->speed;
+ np->speed = ethtool_cmd_speed(ecmd);
np->duplex = ecmd->duplex;
/* user overriding the initial full duplex parm? */
if (np->duplex == DUPLEX_HALF)
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index 243ed2aee88e..e8984b0ca521 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -80,17 +80,20 @@ static void ne3210_block_output(struct net_device *dev, int count, const unsigne
#define NE3210_DEBUG 0x0
-static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3};
-static unsigned int shmem_map[] __initdata = {0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0};
-static const char *ifmap[] __initdata = {"UTP", "?", "BNC", "AUI"};
-static int ifmap_val[] __initdata = {
+static const unsigned char irq_map[] __devinitconst =
+ { 15, 12, 11, 10, 9, 7, 5, 3 };
+static const unsigned int shmem_map[] __devinitconst =
+ { 0xff0, 0xfe0, 0xfff0, 0xd8, 0xffe0, 0xffc0, 0xd0, 0x0 };
+static const char *const ifmap[] __devinitconst =
+ { "UTP", "?", "BNC", "AUI" };
+static const int ifmap_val[] __devinitconst = {
IF_PORT_10BASET,
IF_PORT_UNKNOWN,
IF_PORT_10BASE2,
IF_PORT_AUI,
};
-static int __init ne3210_eisa_probe (struct device *device)
+static int __devinit ne3210_eisa_probe (struct device *device)
{
unsigned long ioaddr, phys_mem;
int i, retval, port_index;
@@ -313,7 +316,7 @@ static void ne3210_block_output(struct net_device *dev, int count,
memcpy_toio(shmem, buf, count);
}
-static struct eisa_device_id ne3210_ids[] = {
+static const struct eisa_device_id ne3210_ids[] __devinitconst = {
{ "EGL0101" },
{ "NVL1801" },
{ "" },
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index dfb67eb2a94b..a83e101440fd 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -242,34 +242,6 @@ static struct netconsole_target *to_target(struct config_item *item)
}
/*
- * Wrapper over simple_strtol (base 10) with sanity and range checking.
- * We return (signed) long only because we may want to return errors.
- * Do not use this to convert numbers that are allowed to be negative.
- */
-static long strtol10_check_range(const char *cp, long min, long max)
-{
- long ret;
- char *p = (char *) cp;
-
- WARN_ON(min < 0);
- WARN_ON(max < min);
-
- ret = simple_strtol(p, &p, 10);
-
- if (*p && (*p != '\n')) {
- printk(KERN_ERR "netconsole: invalid input\n");
- return -EINVAL;
- }
- if ((ret < min) || (ret > max)) {
- printk(KERN_ERR "netconsole: input %ld must be between "
- "%ld and %ld\n", ret, min, max);
- return -EINVAL;
- }
-
- return ret;
-}
-
-/*
* Attribute operations for netconsole_target.
*/
@@ -327,12 +299,14 @@ static ssize_t store_enabled(struct netconsole_target *nt,
const char *buf,
size_t count)
{
+ int enabled;
int err;
- long enabled;
- enabled = strtol10_check_range(buf, 0, 1);
- if (enabled < 0)
- return enabled;
+ err = kstrtoint(buf, 10, &enabled);
+ if (err < 0)
+ return err;
+ if (enabled < 0 || enabled > 1)
+ return -EINVAL;
if (enabled) { /* 1 */
@@ -384,8 +358,7 @@ static ssize_t store_local_port(struct netconsole_target *nt,
const char *buf,
size_t count)
{
- long local_port;
-#define __U16_MAX ((__u16) ~0U)
+ int rv;
if (nt->enabled) {
printk(KERN_ERR "netconsole: target (%s) is enabled, "
@@ -394,12 +367,9 @@ static ssize_t store_local_port(struct netconsole_target *nt,
return -EINVAL;
}
- local_port = strtol10_check_range(buf, 0, __U16_MAX);
- if (local_port < 0)
- return local_port;
-
- nt->np.local_port = local_port;
-
+ rv = kstrtou16(buf, 10, &nt->np.local_port);
+ if (rv < 0)
+ return rv;
return strnlen(buf, count);
}
@@ -407,8 +377,7 @@ static ssize_t store_remote_port(struct netconsole_target *nt,
const char *buf,
size_t count)
{
- long remote_port;
-#define __U16_MAX ((__u16) ~0U)
+ int rv;
if (nt->enabled) {
printk(KERN_ERR "netconsole: target (%s) is enabled, "
@@ -417,12 +386,9 @@ static ssize_t store_remote_port(struct netconsole_target *nt,
return -EINVAL;
}
- remote_port = strtol10_check_range(buf, 0, __U16_MAX);
- if (remote_port < 0)
- return remote_port;
-
- nt->np.remote_port = remote_port;
-
+ rv = kstrtou16(buf, 10, &nt->np.remote_port);
+ if (rv < 0)
+ return rv;
return strnlen(buf, count);
}
@@ -463,8 +429,6 @@ static ssize_t store_remote_mac(struct netconsole_target *nt,
size_t count)
{
u8 remote_mac[ETH_ALEN];
- char *p = (char *) buf;
- int i;
if (nt->enabled) {
printk(KERN_ERR "netconsole: target (%s) is enabled, "
@@ -473,23 +437,13 @@ static ssize_t store_remote_mac(struct netconsole_target *nt,
return -EINVAL;
}
- for (i = 0; i < ETH_ALEN - 1; i++) {
- remote_mac[i] = simple_strtoul(p, &p, 16);
- if (*p != ':')
- goto invalid;
- p++;
- }
- remote_mac[ETH_ALEN - 1] = simple_strtoul(p, &p, 16);
- if (*p && (*p != '\n'))
- goto invalid;
-
+ if (!mac_pton(buf, remote_mac))
+ return -EINVAL;
+ if (buf[3 * ETH_ALEN - 1] && buf[3 * ETH_ALEN - 1] != '\n')
+ return -EINVAL;
memcpy(nt->np.remote_mac, remote_mac, ETH_ALEN);
return strnlen(buf, count);
-
-invalid:
- printk(KERN_ERR "netconsole: invalid input\n");
- return -EINVAL;
}
/*
@@ -671,6 +625,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
goto done;
spin_lock_irqsave(&target_list_lock, flags);
+restart:
list_for_each_entry(nt, &target_list, list) {
netconsole_target_get(nt);
if (nt->np.dev == dev) {
@@ -683,9 +638,16 @@ static int netconsole_netdev_event(struct notifier_block *this,
* rtnl_lock already held
*/
if (nt->np.dev) {
+ spin_unlock_irqrestore(
+ &target_list_lock,
+ flags);
__netpoll_cleanup(&nt->np);
+ spin_lock_irqsave(&target_list_lock,
+ flags);
dev_put(nt->np.dev);
nt->np.dev = NULL;
+ netconsole_target_put(nt);
+ goto restart;
}
/* Fall through */
case NETDEV_GOING_DOWN:
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index d7299f1a4940..77220687b92a 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -174,7 +174,7 @@
#define MAX_NUM_CARDS 4
-#define MAX_BUFFERS_PER_CMD 32
+#define NETXEN_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
@@ -558,7 +558,7 @@ struct netxen_recv_crb {
*/
struct netxen_cmd_buffer {
struct sk_buff *skb;
- struct netxen_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ struct netxen_skb_frag frag_array[MAX_SKB_FRAGS + 1];
u32 frag_count;
};
@@ -1177,7 +1177,7 @@ struct netxen_adapter {
u8 max_sds_rings;
u8 driver_mismatch;
u8 msix_supported;
- u8 rx_csum;
+ u8 __pad;
u8 pci_using_dac;
u8 portnum;
u8 physical_port;
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 3bdcc803ec68..b34fb74d07e3 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -117,7 +117,7 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_TP;
- ecmd->speed = adapter->link_speed;
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
ecmd->duplex = adapter->link_duplex;
ecmd->autoneg = adapter->link_autoneg;
@@ -134,7 +134,7 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
if (netif_running(dev) && adapter->has_link_events) {
- ecmd->speed = adapter->link_speed;
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
ecmd->autoneg = adapter->link_autoneg;
ecmd->duplex = adapter->link_duplex;
goto skip;
@@ -146,10 +146,10 @@ netxen_nic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
u16 pcifn = adapter->ahw.pci_func;
val = NXRD32(adapter, P3_LINK_SPEED_REG(pcifn));
- ecmd->speed = P3_LINK_SPEED_MHZ *
- P3_LINK_SPEED_VAL(pcifn, val);
+ ethtool_cmd_speed_set(ecmd, P3_LINK_SPEED_MHZ *
+ P3_LINK_SPEED_VAL(pcifn, val));
} else
- ecmd->speed = SPEED_10000;
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
@@ -251,6 +251,7 @@ static int
netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct netxen_adapter *adapter = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(ecmd);
int ret;
if (adapter->ahw.port_type != NETXEN_NIC_GBE)
@@ -259,14 +260,14 @@ netxen_nic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (!(adapter->capabilities & NX_FW_CAPABILITY_GBE_LINK_CFG))
return -EOPNOTSUPP;
- ret = nx_fw_cmd_set_gbe_port(adapter, ecmd->speed, ecmd->duplex,
+ ret = nx_fw_cmd_set_gbe_port(adapter, speed, ecmd->duplex,
ecmd->autoneg);
if (ret == NX_RCODE_NOT_SUPPORTED)
return -EOPNOTSUPP;
else if (ret)
return -EIO;
- adapter->link_speed = ecmd->speed;
+ adapter->link_speed = speed;
adapter->link_duplex = ecmd->duplex;
adapter->link_autoneg = ecmd->autoneg;
@@ -676,62 +677,6 @@ netxen_nic_get_ethtool_stats(struct net_device *dev,
}
}
-static u32 netxen_nic_get_tx_csum(struct net_device *dev)
-{
- return dev->features & NETIF_F_IP_CSUM;
-}
-
-static u32 netxen_nic_get_rx_csum(struct net_device *dev)
-{
- struct netxen_adapter *adapter = netdev_priv(dev);
- return adapter->rx_csum;
-}
-
-static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct netxen_adapter *adapter = netdev_priv(dev);
-
- if (data) {
- adapter->rx_csum = data;
- return 0;
- }
-
- if (dev->features & NETIF_F_LRO) {
- if (netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_DISABLED))
- return -EIO;
-
- dev->features &= ~NETIF_F_LRO;
- netxen_send_lro_cleanup(adapter);
- netdev_info(dev, "disabling LRO as rx_csum is off\n");
- }
- adapter->rx_csum = data;
- return 0;
-}
-
-static u32 netxen_nic_get_tso(struct net_device *dev)
-{
- struct netxen_adapter *adapter = netdev_priv(dev);
-
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
-
- return (dev->features & NETIF_F_TSO) != 0;
-}
-
-static int netxen_nic_set_tso(struct net_device *dev, u32 data)
-{
- if (data) {
- struct netxen_adapter *adapter = netdev_priv(dev);
-
- dev->features |= NETIF_F_TSO;
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
- dev->features |= NETIF_F_TSO6;
- } else
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
-
- return 0;
-}
-
static void
netxen_nic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
@@ -866,43 +811,6 @@ static int netxen_get_intr_coalesce(struct net_device *netdev,
return 0;
}
-static int netxen_nic_set_flags(struct net_device *netdev, u32 data)
-{
- struct netxen_adapter *adapter = netdev_priv(netdev);
- int hw_lro;
-
- if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
- return -EINVAL;
-
- if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
- return -EINVAL;
-
- if (!adapter->rx_csum) {
- netdev_info(netdev, "rx csum is off, cannot toggle LRO\n");
- return -EINVAL;
- }
-
- if (!!(data & ETH_FLAG_LRO) == !!(netdev->features & NETIF_F_LRO))
- return 0;
-
- if (data & ETH_FLAG_LRO) {
- hw_lro = NETXEN_NIC_LRO_ENABLED;
- netdev->features |= NETIF_F_LRO;
- } else {
- hw_lro = NETXEN_NIC_LRO_DISABLED;
- netdev->features &= ~NETIF_F_LRO;
- }
-
- if (netxen_config_hw_lro(adapter, hw_lro))
- return -EIO;
-
- if ((hw_lro == 0) && netxen_send_lro_cleanup(adapter))
- return -EIO;
-
-
- return 0;
-}
-
const struct ethtool_ops netxen_nic_ethtool_ops = {
.get_settings = netxen_nic_get_settings,
.set_settings = netxen_nic_set_settings,
@@ -916,21 +824,12 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
.set_ringparam = netxen_nic_set_ringparam,
.get_pauseparam = netxen_nic_get_pauseparam,
.set_pauseparam = netxen_nic_set_pauseparam,
- .get_tx_csum = netxen_nic_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
- .get_tso = netxen_nic_get_tso,
- .set_tso = netxen_nic_set_tso,
.get_wol = netxen_nic_get_wol,
.set_wol = netxen_nic_set_wol,
.self_test = netxen_nic_diag_test,
.get_strings = netxen_nic_get_strings,
.get_ethtool_stats = netxen_nic_get_ethtool_stats,
.get_sset_count = netxen_get_sset_count,
- .get_rx_csum = netxen_nic_get_rx_csum,
- .set_rx_csum = netxen_nic_set_rx_csum,
.get_coalesce = netxen_get_intr_coalesce,
.set_coalesce = netxen_set_intr_coalesce,
- .get_flags = ethtool_op_get_flags,
- .set_flags = netxen_nic_set_flags,
};
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index d8bd73d7e296..dc1967c1f312 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -780,7 +780,7 @@ enum {
/*
* capabilities register, can be used to selectively enable/disable features
- * for backward compability
+ * for backward compatibility
*/
#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8)
#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270)
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 731077d8d962..7f999671c7b2 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -1483,7 +1483,8 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter,
if (!skb)
goto no_skb;
- if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
+ if (likely((adapter->netdev->features & NETIF_F_RXCSUM)
+ && cksum == STATUS_CKSUM_OK)) {
adapter->stats.csummed++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 83348dc4b184..b644383017f9 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -485,6 +485,37 @@ static void netxen_set_multicast_list(struct net_device *dev)
adapter->set_multi(dev);
}
+static u32 netxen_fix_features(struct net_device *dev, u32 features)
+{
+ if (!(features & NETIF_F_RXCSUM)) {
+ netdev_info(dev, "disabling LRO as RXCSUM is off\n");
+
+ features &= ~NETIF_F_LRO;
+ }
+
+ return features;
+}
+
+static int netxen_set_features(struct net_device *dev, u32 features)
+{
+ struct netxen_adapter *adapter = netdev_priv(dev);
+ int hw_lro;
+
+ if (!((dev->features ^ features) & NETIF_F_LRO))
+ return 0;
+
+ hw_lro = (features & NETIF_F_LRO) ? NETXEN_NIC_LRO_ENABLED
+ : NETXEN_NIC_LRO_DISABLED;
+
+ if (netxen_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if (!(features & NETIF_F_LRO) && netxen_send_lro_cleanup(adapter))
+ return -EIO;
+
+ return 0;
+}
+
static const struct net_device_ops netxen_netdev_ops = {
.ndo_open = netxen_nic_open,
.ndo_stop = netxen_nic_close,
@@ -495,6 +526,8 @@ static const struct net_device_ops netxen_netdev_ops = {
.ndo_set_mac_address = netxen_nic_set_mac,
.ndo_change_mtu = netxen_nic_change_mtu,
.ndo_tx_timeout = netxen_tx_timeout,
+ .ndo_fix_features = netxen_fix_features,
+ .ndo_set_features = netxen_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = netxen_nic_poll_controller,
#endif
@@ -905,7 +938,7 @@ netxen_nic_request_irq(struct netxen_adapter *adapter)
struct nx_host_sds_ring *sds_ring;
int err, ring;
- unsigned long flags = IRQF_SAMPLE_RANDOM;
+ unsigned long flags = 0;
struct net_device *netdev = adapter->netdev;
struct netxen_recv_context *recv_ctx = &adapter->recv_ctx;
@@ -1196,7 +1229,6 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
int err = 0;
struct pci_dev *pdev = adapter->pdev;
- adapter->rx_csum = 1;
adapter->mc_enabled = 0;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
adapter->max_mc_count = 38;
@@ -1210,14 +1242,13 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
- netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
- netdev->features |= (NETIF_F_GRO);
- netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM;
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
- netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
- netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
- }
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
+ netdev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+ netdev->vlan_features |= netdev->hw_features;
if (adapter->pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
@@ -1225,10 +1256,12 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
}
if (adapter->capabilities & NX_FW_CAPABILITY_FVLANTX)
- netdev->features |= (NETIF_F_HW_VLAN_TX);
+ netdev->hw_features |= NETIF_F_HW_VLAN_TX;
if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)
- netdev->features |= NETIF_F_LRO;
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->features |= netdev->hw_features;
netdev->irq = adapter->msix_entries[0].vector;
@@ -1844,6 +1877,8 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
int i, k;
+ int delta = 0;
+ struct skb_frag_struct *frag;
u32 producer;
int frag_count, no_of_desc;
@@ -1851,6 +1886,21 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > NETXEN_MAX_FRAGS_PER_TX) {
+
+ for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ delta += frag->size;
+ }
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
/* 4 fragments per cmd des */
no_of_desc = (frag_count + 3) >> 2;
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 32678b6c6b39..cc25bff0bd3b 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -1233,7 +1233,7 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
bmsr = err;
if (bmsr & BMSR_LSTATUS) {
- u16 adv, lpa, common, estat;
+ u16 adv, lpa;
err = mii_read(np, np->phy_addr, MII_ADVERTISE);
if (err < 0)
@@ -1245,12 +1245,9 @@ static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
goto out;
lpa = err;
- common = adv & lpa;
-
err = mii_read(np, np->phy_addr, MII_ESTATUS);
if (err < 0)
goto out;
- estat = err;
link_up = 1;
current_speed = SPEED_1000;
current_duplex = DUPLEX_FULL;
@@ -1650,7 +1647,7 @@ static int xcvr_init_10g(struct niu *np)
break;
}
- return 0;
+ return err;
}
static int mii_reset(struct niu *np)
@@ -2381,17 +2378,14 @@ static int serdes_init_10g_serdes(struct niu *np)
struct niu_link_config *lp = &np->link_config;
unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
u64 ctrl_val, test_cfg_val, sig, mask, val;
- u64 reset_val;
switch (np->port) {
case 0:
- reset_val = ENET_SERDES_RESET_0;
ctrl_reg = ENET_SERDES_0_CTRL_CFG;
test_cfg_reg = ENET_SERDES_0_TEST_CFG;
pll_cfg = ENET_SERDES_0_PLL_CFG;
break;
case 1:
- reset_val = ENET_SERDES_RESET_1;
ctrl_reg = ENET_SERDES_1_CTRL_CFG;
test_cfg_reg = ENET_SERDES_1_TEST_CFG;
pll_cfg = ENET_SERDES_1_PLL_CFG;
@@ -6071,8 +6065,7 @@ static int niu_request_irq(struct niu *np)
for (i = 0; i < np->num_ldg; i++) {
struct niu_ldg *lp = &np->ldg[i];
- err = request_irq(lp->irq, niu_interrupt,
- IRQF_SHARED | IRQF_SAMPLE_RANDOM,
+ err = request_irq(lp->irq, niu_interrupt, IRQF_SHARED,
np->irq_name[i], lp);
if (err)
goto out_free_irqs;
@@ -6851,7 +6844,7 @@ static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->supported = lp->supported;
cmd->advertising = lp->active_advertising;
cmd->autoneg = lp->active_autoneg;
- cmd->speed = lp->active_speed;
+ ethtool_cmd_speed_set(cmd, lp->active_speed);
cmd->duplex = lp->active_duplex;
cmd->port = (np->flags & NIU_FLAGS_FIBER) ? PORT_FIBRE : PORT_TP;
cmd->transceiver = (np->flags & NIU_FLAGS_XCVR_SERDES) ?
@@ -6866,7 +6859,7 @@ static int niu_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct niu_link_config *lp = &np->link_config;
lp->advertising = cmd->advertising;
- lp->speed = cmd->speed;
+ lp->speed = ethtool_cmd_speed(cmd);
lp->duplex = cmd->duplex;
lp->autoneg = cmd->autoneg;
return niu_init_link(np);
@@ -7023,6 +7016,7 @@ static int niu_ethflow_to_class(int flow_type, u64 *class)
case UDP_V4_FLOW:
*class = CLASS_CODE_UDP_IPV4;
break;
+ case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
*class = CLASS_CODE_AH_ESP_IPV4;
@@ -7036,6 +7030,7 @@ static int niu_ethflow_to_class(int flow_type, u64 *class)
case UDP_V6_FLOW:
*class = CLASS_CODE_UDP_IPV6;
break;
+ case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
*class = CLASS_CODE_AH_ESP_IPV6;
@@ -7889,37 +7884,35 @@ static void niu_force_led(struct niu *np, int on)
nw64_mac(reg, val);
}
-static int niu_phys_id(struct net_device *dev, u32 data)
+static int niu_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+
{
struct niu *np = netdev_priv(dev);
- u64 orig_led_state;
- int i;
if (!netif_running(dev))
return -EAGAIN;
- if (data == 0)
- data = 2;
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ np->orig_led_state = niu_led_state_save(np);
+ return 1; /* cycle on/off once per second */
- orig_led_state = niu_led_state_save(np);
- for (i = 0; i < (data * 2); i++) {
- int on = ((i % 2) == 0);
+ case ETHTOOL_ID_ON:
+ niu_force_led(np, 1);
+ break;
- niu_force_led(np, on);
+ case ETHTOOL_ID_OFF:
+ niu_force_led(np, 0);
+ break;
- if (msleep_interruptible(500))
- break;
+ case ETHTOOL_ID_INACTIVE:
+ niu_led_state_restore(np, np->orig_led_state);
}
- niu_led_state_restore(np, orig_led_state);
return 0;
}
-static int niu_set_flags(struct net_device *dev, u32 data)
-{
- return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
-}
-
static const struct ethtool_ops niu_ethtool_ops = {
.get_drvinfo = niu_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -7933,11 +7926,9 @@ static const struct ethtool_ops niu_ethtool_ops = {
.get_strings = niu_get_strings,
.get_sset_count = niu_get_sset_count,
.get_ethtool_stats = niu_get_ethtool_stats,
- .phys_id = niu_phys_id,
+ .set_phys_id = niu_set_phys_id,
.get_rxnfc = niu_get_nfc,
.set_rxnfc = niu_set_nfc,
- .set_flags = niu_set_flags,
- .get_flags = ethtool_op_get_flags,
};
static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
@@ -8131,7 +8122,7 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
netif_printk(np, probe, KERN_DEBUG, np->dev,
"VPD_SCAN: start[%x] end[%x]\n", start, end);
while (start < end) {
- int len, err, instance, type, prop_len;
+ int len, err, prop_len;
char namebuf[64];
u8 *prop_buf;
int max_len;
@@ -8147,8 +8138,6 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
len = err;
start += 3;
- instance = niu_pci_eeprom_read(np, start);
- type = niu_pci_eeprom_read(np, start + 3);
prop_len = niu_pci_eeprom_read(np, start + 4);
err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
if (err < 0)
@@ -9768,8 +9757,8 @@ static void __devinit niu_device_announce(struct niu *np)
static void __devinit niu_set_basic_features(struct net_device *dev)
{
- dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM |
- NETIF_F_GRO | NETIF_F_RXHASH);
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXHASH;
+ dev->features |= dev->hw_features | NETIF_F_RXCSUM;
}
static int __devinit niu_pci_init_one(struct pci_dev *pdev,
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index a41fa8ebe05f..51e177e1860d 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3279,6 +3279,7 @@ struct niu {
unsigned long xpcs_off;
struct timer_list timer;
+ u64 orig_led_state;
const struct niu_phy_ops *phy_ops;
int phy_addr;
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index a41b2cf4d917..3e4040f2f3cb 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -512,7 +512,7 @@ static void ns83820_vlan_rx_register(struct net_device *ndev, struct vlan_group
/* Packet Receiver
*
* The hardware supports linked lists of receive descriptors for
- * which ownership is transfered back and forth by means of an
+ * which ownership is transferred back and forth by means of an
* ownership bit. While the hardware does support the use of a
* ring for receive descriptors, we only make use of a chain in
* an attempt to reduce bus traffic under heavy load scenarios.
@@ -1147,7 +1147,7 @@ again:
#ifdef NS83820_VLAN_ACCEL_SUPPORT
if(vlan_tx_tag_present(skb)) {
/* fetch the vlan tag info out of the
- * ancilliary data if the vlan code
+ * ancillary data if the vlan code
* is using hw vlan acceleration
*/
short tag = vlan_tx_tag_get(skb);
@@ -1251,7 +1251,7 @@ static int ns83820_get_settings(struct net_device *ndev,
/*
* Here's the list of available ethtool commands from other drivers:
* cmd->advertising =
- * cmd->speed =
+ * ethtool_cmd_speed_set(cmd, ...)
* cmd->duplex =
* cmd->port = 0;
* cmd->phy_address =
@@ -1289,13 +1289,13 @@ static int ns83820_get_settings(struct net_device *ndev,
cmd->duplex = fullduplex ? DUPLEX_FULL : DUPLEX_HALF;
switch (cfg / CFG_SPDSTS0 & 3) {
case 2:
- cmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
break;
case 1:
- cmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(cmd, SPEED_100);
break;
default:
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
break;
}
cmd->autoneg = (tbicr & TBICR_MR_AN_ENABLE)
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index e1e33c80fb25..59fac77d0dbb 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -351,7 +351,7 @@ struct pch_gbe_functions {
};
/**
- * struct pch_gbe_mac_info - MAC infomation
+ * struct pch_gbe_mac_info - MAC information
* @addr[6]: Store the MAC address
* @fc: Mode of flow control
* @fc_autoneg: Auto negotiation enable for flow control setting
@@ -375,7 +375,7 @@ struct pch_gbe_mac_info {
};
/**
- * struct pch_gbe_phy_info - PHY infomation
+ * struct pch_gbe_phy_info - PHY information
* @addr: PHY address
* @id: PHY's identifier
* @revision: PHY's revision
@@ -393,7 +393,7 @@ struct pch_gbe_phy_info {
/*!
* @ingroup Gigabit Ether driver Layer
* @struct pch_gbe_bus_info
- * @brief Bus infomation
+ * @brief Bus information
*/
struct pch_gbe_bus_info {
u8 type;
@@ -404,7 +404,7 @@ struct pch_gbe_bus_info {
/*!
* @ingroup Gigabit Ether driver Layer
* @struct pch_gbe_hw
- * @brief Hardware infomation
+ * @brief Hardware information
*/
struct pch_gbe_hw {
void *back;
@@ -462,7 +462,7 @@ struct pch_gbe_tx_desc {
/**
- * struct pch_gbe_buffer - Buffer infomation
+ * struct pch_gbe_buffer - Buffer information
* @skb: pointer to a socket buffer
* @dma: DMA address
* @time_stamp: time stamp
@@ -477,7 +477,7 @@ struct pch_gbe_buffer {
};
/**
- * struct pch_gbe_tx_ring - tx ring infomation
+ * struct pch_gbe_tx_ring - tx ring information
* @tx_lock: spinlock structs
* @desc: pointer to the descriptor ring memory
* @dma: physical address of the descriptor ring
@@ -499,7 +499,7 @@ struct pch_gbe_tx_ring {
};
/**
- * struct pch_gbe_rx_ring - rx ring infomation
+ * struct pch_gbe_rx_ring - rx ring information
* @desc: pointer to the descriptor ring memory
* @dma: physical address of the descriptor ring
* @size: length of descriptor ring in bytes
@@ -597,8 +597,6 @@ struct pch_gbe_hw_stats {
* @rx_ring: Pointer of Rx descriptor ring structure
* @rx_buffer_len: Receive buffer length
* @tx_queue_len: Transmit queue length
- * @rx_csum: Receive TCP/IP checksum enable/disable
- * @tx_csum: Transmit TCP/IP checksum enable/disable
* @have_msi: PCI MSI mode flag
*/
@@ -623,8 +621,6 @@ struct pch_gbe_adapter {
struct pch_gbe_rx_ring *rx_ring;
unsigned long rx_buffer_len;
unsigned long tx_queue_len;
- bool rx_csum;
- bool tx_csum;
bool have_msi;
};
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
index c8c873b31a89..ea2d8e41887a 100644
--- a/drivers/net/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -21,7 +21,7 @@
#include "pch_gbe_api.h"
/**
- * pch_gbe_stats - Stats item infomation
+ * pch_gbe_stats - Stats item information
*/
struct pch_gbe_stats {
char string[ETH_GSTRING_LEN];
@@ -92,7 +92,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
if (!netif_carrier_ok(adapter->netdev))
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
return ret;
}
@@ -109,12 +109,15 @@ static int pch_gbe_set_settings(struct net_device *netdev,
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_hw *hw = &adapter->hw;
+ u32 speed = ethtool_cmd_speed(ecmd);
int ret;
pch_gbe_hal_write_phy_reg(hw, MII_BMCR, BMCR_RESET);
- if (ecmd->speed == USHRT_MAX) {
- ecmd->speed = SPEED_1000;
+ /* when set_settings() is called with a ethtool_cmd previously
+ * filled by get_settings() on a down link, speed is -1: */
+ if (speed == UINT_MAX) {
+ speed = SPEED_1000;
ecmd->duplex = DUPLEX_FULL;
}
ret = mii_ethtool_sset(&adapter->mii, ecmd);
@@ -122,7 +125,7 @@ static int pch_gbe_set_settings(struct net_device *netdev,
pr_err("Error: mii_ethtool_sset\n");
return ret;
}
- hw->mac.link_speed = ecmd->speed;
+ hw->mac.link_speed = speed;
hw->mac.link_duplex = ecmd->duplex;
hw->phy.autoneg_advertised = ecmd->advertising;
hw->mac.autoneg = ecmd->autoneg;
@@ -434,57 +437,6 @@ static int pch_gbe_set_pauseparam(struct net_device *netdev,
}
/**
- * pch_gbe_get_rx_csum - Report whether receive checksums are turned on or off
- * @netdev: Network interface device structure
- * Returns
- * true(1): Checksum On
- * false(0): Checksum Off
- */
-static u32 pch_gbe_get_rx_csum(struct net_device *netdev)
-{
- struct pch_gbe_adapter *adapter = netdev_priv(netdev);
-
- return adapter->rx_csum;
-}
-
-/**
- * pch_gbe_set_rx_csum - Turn receive checksum on or off
- * @netdev: Network interface device structure
- * @data: Checksum On[true] or Off[false]
- * Returns
- * 0: Successful.
- * Negative value: Failed.
- */
-static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct pch_gbe_adapter *adapter = netdev_priv(netdev);
-
- adapter->rx_csum = data;
- if ((netif_running(netdev)))
- pch_gbe_reinit_locked(adapter);
- else
- pch_gbe_reset(adapter);
-
- return 0;
-}
-
-/**
- * pch_gbe_set_tx_csum - Turn transmit checksums on or off
- * @netdev: Network interface device structure
- * @data: Checksum on[true] or off[false]
- * Returns
- * 0: Successful.
- * Negative value: Failed.
- */
-static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
-{
- struct pch_gbe_adapter *adapter = netdev_priv(netdev);
-
- adapter->tx_csum = data;
- return ethtool_op_set_tx_ipv6_csum(netdev, data);
-}
-
-/**
* pch_gbe_get_strings - Return a set of strings that describe the requested
* objects
* @netdev: Network interface device structure
@@ -554,9 +506,6 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
.set_ringparam = pch_gbe_set_ringparam,
.get_pauseparam = pch_gbe_get_pauseparam,
.set_pauseparam = pch_gbe_set_pauseparam,
- .get_rx_csum = pch_gbe_get_rx_csum,
- .set_rx_csum = pch_gbe_set_rx_csum,
- .set_tx_csum = pch_gbe_set_tx_csum,
.get_strings = pch_gbe_get_strings,
.get_ethtool_stats = pch_gbe_get_ethtool_stats,
.get_sset_count = pch_gbe_get_sset_count,
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 50986840c99c..c2476fd96573 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -34,6 +34,10 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_COPYBREAK_DEFAULT 256
#define PCH_GBE_PCI_BAR 1
+/* Macros for ML7223 */
+#define PCI_VENDOR_ID_ROHM 0x10db
+#define PCI_DEVICE_ID_ROHM_ML7223_GBE 0x8013
+
#define PCH_GBE_TX_WEIGHT 64
#define PCH_GBE_RX_WEIGHT 64
#define PCH_GBE_RX_BUFFER_WRITE 16
@@ -43,8 +47,7 @@ const char pch_driver_version[] = DRV_VERSION;
#define PCH_GBE_MAC_RGMII_CTRL_SETTING ( \
PCH_GBE_CHIP_TYPE_INTERNAL | \
- PCH_GBE_RGMII_MODE_RGMII | \
- PCH_GBE_CRS_SEL \
+ PCH_GBE_RGMII_MODE_RGMII \
)
/* Ethertype field values */
@@ -656,6 +659,7 @@ static void pch_gbe_configure_tx(struct pch_gbe_adapter *adapter)
*/
static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
{
+ struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
u32 rx_mode, tcpip;
@@ -666,7 +670,7 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
tcpip = ioread32(&hw->reg->TCPIP_ACC);
- if (adapter->rx_csum) {
+ if (netdev->features & NETIF_F_RXCSUM) {
tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF;
tcpip |= PCH_GBE_RX_TCPIPACC_EN;
} else {
@@ -887,12 +891,12 @@ static void pch_gbe_watchdog(unsigned long data)
struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
- struct ethtool_cmd cmd;
pr_debug("right now = %ld\n", jiffies);
pch_gbe_update_stats(adapter);
if ((mii_link_ok(&adapter->mii)) && (!netif_carrier_ok(netdev))) {
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
netdev->tx_queue_len = adapter->tx_queue_len;
/* mii library handles link maintenance tasks */
if (mii_ethtool_gset(&adapter->mii, &cmd)) {
@@ -902,7 +906,7 @@ static void pch_gbe_watchdog(unsigned long data)
PCH_GBE_WATCHDOG_PERIOD));
return;
}
- hw->mac.link_speed = cmd.speed;
+ hw->mac.link_speed = ethtool_cmd_speed(&cmd);
hw->mac.link_duplex = cmd.duplex;
/* Set the RGMII control. */
pch_gbe_set_rgmii_ctrl(adapter, hw->mac.link_speed,
@@ -912,7 +916,7 @@ static void pch_gbe_watchdog(unsigned long data)
hw->mac.link_duplex);
netdev_dbg(netdev,
"Link is Up %d Mbps %s-Duplex\n",
- cmd.speed,
+ hw->mac.link_speed,
cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
netif_carrier_on(netdev);
netif_wake_queue(netdev);
@@ -950,7 +954,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
frame_ctrl = 0;
if (unlikely(skb->len < PCH_GBE_SHORT_PKT))
frame_ctrl |= PCH_GBE_TXD_CTRL_APAD;
- if (unlikely(!adapter->tx_csum))
+ if (skb->ip_summed == CHECKSUM_NONE)
frame_ctrl |= PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
/* Performs checksum processing */
@@ -958,7 +962,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
* It is because the hardware accelerator does not support a checksum,
* when the received data size is less than 64 bytes.
*/
- if ((skb->len < PCH_GBE_SHORT_PKT) && (adapter->tx_csum)) {
+ if (skb->len < PCH_GBE_SHORT_PKT && skb->ip_summed != CHECKSUM_NONE) {
frame_ctrl |= PCH_GBE_TXD_CTRL_APAD |
PCH_GBE_TXD_CTRL_TCPIP_ACC_OFF;
if (skb->protocol == htons(ETH_P_IP)) {
@@ -1011,7 +1015,7 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
tmp_skb->len = skb->len;
memcpy(&tmp_skb->data[ETH_HLEN + 2], &skb->data[ETH_HLEN],
(skb->len - ETH_HLEN));
- /*-- Set Buffer infomation --*/
+ /*-- Set Buffer information --*/
buffer_info->length = tmp_skb->len;
buffer_info->dma = dma_map_single(&adapter->pdev->dev, tmp_skb->data,
buffer_info->length,
@@ -1426,7 +1430,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
length = (rx_desc->rx_words_eob) - 3;
/* Decide the data conversion method */
- if (!adapter->rx_csum) {
+ if (!(netdev->features & NETIF_F_RXCSUM)) {
/* [Header:14][payload] */
if (NET_IP_ALIGN) {
/* Because alignment differs,
@@ -1494,12 +1498,11 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
/* Write meta date of skb */
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
- if ((tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK) ==
- PCH_GBE_RXD_ACC_STAT_TCPIPOK) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
+ if (tcp_ip_status & PCH_GBE_RXD_ACC_STAT_TCPIPOK)
skb->ip_summed = CHECKSUM_NONE;
- }
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
napi_gro_receive(&adapter->napi, skb);
(*work_done)++;
pr_debug("Receive skb->ip_summed: %d length: %d\n",
@@ -1540,7 +1543,7 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
tx_ring->buffer_info = vzalloc(size);
if (!tx_ring->buffer_info) {
- pr_err("Unable to allocate memory for the buffer infomation\n");
+ pr_err("Unable to allocate memory for the buffer information\n");
return -ENOMEM;
}
@@ -2030,6 +2033,29 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
}
/**
+ * pch_gbe_set_features - Reset device after features changed
+ * @netdev: Network interface device structure
+ * @features: New features
+ * Returns
+ * 0: HW state updated successfully
+ */
+static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+{
+ struct pch_gbe_adapter *adapter = netdev_priv(netdev);
+ u32 changed = features ^ netdev->features;
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ if (netif_running(netdev))
+ pch_gbe_reinit_locked(adapter);
+ else
+ pch_gbe_reset(adapter);
+
+ return 0;
+}
+
+/**
* pch_gbe_ioctl - Controls register through a MII interface
* @netdev: Network interface device structure
* @ifr: Pointer to ifr structure
@@ -2129,6 +2155,7 @@ static const struct net_device_ops pch_gbe_netdev_ops = {
.ndo_set_mac_address = pch_gbe_set_mac,
.ndo_tx_timeout = pch_gbe_tx_timeout,
.ndo_change_mtu = pch_gbe_change_mtu,
+ .ndo_set_features = pch_gbe_set_features,
.ndo_do_ioctl = pch_gbe_ioctl,
.ndo_set_multicast_list = &pch_gbe_set_multi,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2334,7 +2361,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
netif_napi_add(netdev, &adapter->napi,
pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
- netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
+ netdev->hw_features = NETIF_F_RXCSUM |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->features = netdev->hw_features;
pch_gbe_set_ethtool_ops(netdev);
pch_gbe_mac_load_mac_addr(&adapter->hw);
@@ -2373,11 +2402,6 @@ static int pch_gbe_probe(struct pci_dev *pdev,
pch_gbe_check_options(adapter);
- if (adapter->tx_csum)
- netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- else
- netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-
/* initialize the wol settings based on the eeprom settings */
adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
dev_info(&pdev->dev, "MAC address : %pM\n", netdev->dev_addr);
@@ -2420,6 +2444,13 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
.class = (PCI_CLASS_NETWORK_ETHERNET << 8),
.class_mask = (0xFFFF00)
},
+ {.vendor = PCI_VENDOR_ID_ROHM,
+ .device = PCI_DEVICE_ID_ROHM_ML7223_GBE,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00)
+ },
/* required last entry */
{0}
};
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
index ef0996a0eaaa..5b5d90a47e29 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -426,6 +426,8 @@ full_duplex_only:
void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
{
struct pch_gbe_hw *hw = &adapter->hw;
+ struct net_device *dev = adapter->netdev;
+ int val;
{ /* Transmit Descriptor Count */
static const struct pch_gbe_option opt = {
@@ -466,9 +468,10 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.err = "defaulting to Enabled",
.def = PCH_GBE_DEFAULT_RX_CSUM
};
- adapter->rx_csum = XsumRX;
- pch_gbe_validate_option((int *)(&adapter->rx_csum),
- &opt, adapter);
+ val = XsumRX;
+ pch_gbe_validate_option(&val, &opt, adapter);
+ if (!val)
+ dev->features &= ~NETIF_F_RXCSUM;
}
{ /* Checksum Offload Enable/Disable */
static const struct pch_gbe_option opt = {
@@ -477,9 +480,10 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.err = "defaulting to Enabled",
.def = PCH_GBE_DEFAULT_TX_CSUM
};
- adapter->tx_csum = XsumTX;
- pch_gbe_validate_option((int *)(&adapter->tx_csum),
- &opt, adapter);
+ val = XsumTX;
+ pch_gbe_validate_option(&val, &opt, adapter);
+ if (!val)
+ dev->features &= ~NETIF_F_ALL_CSUM;
}
{ /* Flow Control */
static const struct pch_gbe_option opt = {
diff --git a/drivers/net/pch_gbe/pch_gbe_phy.c b/drivers/net/pch_gbe/pch_gbe_phy.c
index 923a687acd30..28bb9603d736 100644
--- a/drivers/net/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/pch_gbe/pch_gbe_phy.c
@@ -247,7 +247,7 @@ inline void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
{
struct pch_gbe_adapter *adapter;
- struct ethtool_cmd cmd;
+ struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
int ret;
u16 mii_reg;
@@ -256,7 +256,7 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
if (ret)
pr_err("Error: mii_ethtool_gset\n");
- cmd.speed = hw->mac.link_speed;
+ ethtool_cmd_speed_set(&cmd, hw->mac.link_speed);
cmd.duplex = hw->mac.link_duplex;
cmd.advertising = hw->phy.autoneg_advertised;
cmd.autoneg = hw->mac.autoneg;
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 1766dc4f07e1..c0f23376a462 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -214,7 +214,7 @@ static struct {
{ "SMC1211TX EZCard 10/100 (RealTek RTL8139)" },
/* { MPX5030, "Accton MPX5030 (RealTek RTL8139)" },*/
{ "Delta Electronics 8139 10/100BaseTX" },
- { "Addtron Technolgy 8139 10/100BaseTX" },
+ { "Addtron Technology 8139 10/100BaseTX" },
};
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 321b12f82645..81ac330f931d 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -950,7 +950,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
}
/* Update statistics.
- Suprisingly this need not be run single-threaded, but it effectively is.
+ Surprisingly this need not be run single-threaded, but it effectively is.
The counters clear when read, so the adds must merely be atomic.
*/
static void update_stats(struct net_device *dev)
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d3cb77205863..3077d72e8222 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -780,7 +780,7 @@ module_exit(exit_axnet_cs);
Alexey Kuznetsov : use the 8390's six bit hash multicast filter.
Paul Gortmaker : tweak ANK's above multicast changes a bit.
Paul Gortmaker : update packet statistics for v2.1.x
- Alan Cox : support arbitary stupid port mappings on the
+ Alan Cox : support arbitrary stupid port mappings on the
68K Macintosh. Support >16bit I/O spaces
Paul Gortmaker : add kmod support for auto-loading of the 8390
module by all drivers that require it.
@@ -842,7 +842,7 @@ static void do_set_multicast_list(struct net_device *dev);
/*
* SMP and the 8390 setup.
*
- * The 8390 isnt exactly designed to be multithreaded on RX/TX. There is
+ * The 8390 isn't exactly designed to be multithreaded on RX/TX. There is
* a page register that controls bank and packet buffer access. We guard
* this with ei_local->page_lock. Nobody should assume or set the page other
* than zero when the lock is not held. Lock holders must restore page 0
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index 8a9ff5318923..288e4f1317ee 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1264,7 +1264,7 @@ static netdev_tx_t smc_start_xmit(struct sk_buff *skb,
/*======================================================================
- Handle a Tx anomolous event. Entered while in Window 2.
+ Handle a Tx anomalous event. Entered while in Window 2.
======================================================================*/
@@ -1860,7 +1860,7 @@ static int smc_netdev_get_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
tmp = inw(ioaddr + CONFIG);
ecmd->port = (tmp & CFG_AUI_SELECT) ? PORT_AUI : PORT_TP;
ecmd->transceiver = XCVR_INTERNAL;
- ecmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
ecmd->phy_address = ioaddr + MGMT;
SMC_SELECT_BANK(0);
@@ -1875,8 +1875,8 @@ static int smc_netdev_set_ecmd(struct net_device *dev, struct ethtool_cmd *ecmd)
u16 tmp;
unsigned int ioaddr = dev->base_addr;
- if (ecmd->speed != SPEED_10)
- return -EINVAL;
+ if (ethtool_cmd_speed(ecmd) != SPEED_10)
+ return -EINVAL;
if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
return -EINVAL;
if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI)
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index aee3bb0358bf..b48aba9e4227 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -295,12 +295,14 @@ struct pcnet32_private {
struct net_device *next;
struct mii_if_info mii_if;
struct timer_list watchdog_timer;
- struct timer_list blink_timer;
u32 msg_enable; /* debug message level */
/* each bit indicates an available PHY */
u32 phymask;
unsigned short chip_version; /* which variant this is */
+
+ /* saved registers during ethtool blink */
+ u16 save_regs[4];
};
static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
@@ -324,8 +326,6 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits);
static void pcnet32_ethtool_test(struct net_device *dev,
struct ethtool_test *eth_test, u64 * data);
static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1);
-static int pcnet32_phys_id(struct net_device *dev, u32 data);
-static void pcnet32_led_blink_callback(struct net_device *dev);
static int pcnet32_get_regs_len(struct net_device *dev);
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *ptr);
@@ -1022,7 +1022,8 @@ clean_up:
return rc;
} /* end pcnet32_loopback_test */
-static void pcnet32_led_blink_callback(struct net_device *dev)
+static int pcnet32_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
struct pcnet32_private *lp = netdev_priv(dev);
struct pcnet32_access *a = &lp->a;
@@ -1030,50 +1031,31 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
unsigned long flags;
int i;
- spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++)
- a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
-}
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Save the current value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ lp->save_regs[i - 4] = a->read_bcr(ioaddr, i);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return 2; /* cycle on/off twice per second */
-static int pcnet32_phys_id(struct net_device *dev, u32 data)
-{
- struct pcnet32_private *lp = netdev_priv(dev);
- struct pcnet32_access *a = &lp->a;
- ulong ioaddr = dev->base_addr;
- unsigned long flags;
- int i, regs[4];
+ case ETHTOOL_ID_ON:
+ case ETHTOOL_ID_OFF:
+ /* Blink the led */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ break;
- if (!lp->blink_timer.function) {
- init_timer(&lp->blink_timer);
- lp->blink_timer.function = (void *)pcnet32_led_blink_callback;
- lp->blink_timer.data = (unsigned long)dev;
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore the original value of the bcrs */
+ spin_lock_irqsave(&lp->lock, flags);
+ for (i = 4; i < 8; i++)
+ a->write_bcr(ioaddr, i, lp->save_regs[i - 4]);
+ spin_unlock_irqrestore(&lp->lock, flags);
}
-
- /* Save the current value of the bcrs */
- spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++)
- regs[i - 4] = a->read_bcr(ioaddr, i);
- spin_unlock_irqrestore(&lp->lock, flags);
-
- mod_timer(&lp->blink_timer, jiffies);
- set_current_state(TASK_INTERRUPTIBLE);
-
- /* AV: the limit here makes no sense whatsoever */
- if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ)))
- data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ);
-
- msleep_interruptible(data * 1000);
- del_timer_sync(&lp->blink_timer);
-
- /* Restore the original value of the bcrs */
- spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++)
- a->write_bcr(ioaddr, i, regs[i - 4]);
- spin_unlock_irqrestore(&lp->lock, flags);
-
return 0;
}
@@ -1450,7 +1432,7 @@ static const struct ethtool_ops pcnet32_ethtool_ops = {
.set_ringparam = pcnet32_set_ringparam,
.get_strings = pcnet32_get_strings,
.self_test = pcnet32_ethtool_test,
- .phys_id = pcnet32_phys_id,
+ .set_phys_id = pcnet32_set_phys_id,
.get_regs_len = pcnet32_get_regs_len,
.get_regs = pcnet32_get_regs,
.get_sset_count = pcnet32_get_sset_count,
@@ -1651,7 +1633,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
/*
* On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
* starting until the packet is loaded. Strike one for reliability, lose
- * one for latency - although on PCI this isnt a big loss. Older chips
+ * one for latency - although on PCI this isn't a big loss. Older chips
* have FIFO's smaller than a packet, so you can't do this.
* Turn on BCR18:BurstRdEn and BCR18:BurstWrEn.
*/
@@ -2117,7 +2099,7 @@ static int pcnet32_open(struct net_device *dev)
int first_phy = -1;
u16 bmcr;
u32 bcr9;
- struct ethtool_cmd ecmd;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
/*
* There is really no good other way to handle multiple PHYs
@@ -2133,9 +2115,9 @@ static int pcnet32_open(struct net_device *dev)
ecmd.port = PORT_MII;
ecmd.transceiver = XCVR_INTERNAL;
ecmd.autoneg = AUTONEG_DISABLE;
- ecmd.speed =
- lp->
- options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
+ ethtool_cmd_speed_set(&ecmd,
+ (lp->options & PCNET32_PORT_100) ?
+ SPEED_100 : SPEED_10);
bcr9 = lp->a.read_bcr(ioaddr, 9);
if (lp->options & PCNET32_PORT_FD) {
@@ -2781,11 +2763,11 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
netif_carrier_on(dev);
if (lp->mii) {
if (netif_msg_link(lp)) {
- struct ethtool_cmd ecmd;
+ struct ethtool_cmd ecmd = {
+ .cmd = ETHTOOL_GSET };
mii_ethtool_gset(&lp->mii_if, &ecmd);
- netdev_info(dev, "link up, %sMbps, %s-duplex\n",
- (ecmd.speed == SPEED_100)
- ? "100" : "10",
+ netdev_info(dev, "link up, %uMbps, %s-duplex\n",
+ ethtool_cmd_speed(&ecmd),
(ecmd.duplex == DUPLEX_FULL)
? "full" : "half");
}
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index f7670330f988..a47595760751 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -238,6 +238,8 @@ static void phy_sanitize_settings(struct phy_device *phydev)
*/
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
{
+ u32 speed = ethtool_cmd_speed(cmd);
+
if (cmd->phy_address != phydev->addr)
return -EINVAL;
@@ -253,16 +255,16 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
return -EINVAL;
if (cmd->autoneg == AUTONEG_DISABLE &&
- ((cmd->speed != SPEED_1000 &&
- cmd->speed != SPEED_100 &&
- cmd->speed != SPEED_10) ||
+ ((speed != SPEED_1000 &&
+ speed != SPEED_100 &&
+ speed != SPEED_10) ||
(cmd->duplex != DUPLEX_HALF &&
cmd->duplex != DUPLEX_FULL)))
return -EINVAL;
phydev->autoneg = cmd->autoneg;
- phydev->speed = cmd->speed;
+ phydev->speed = speed;
phydev->advertising = cmd->advertising;
@@ -286,7 +288,7 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
cmd->advertising = phydev->advertising;
- cmd->speed = phydev->speed;
+ ethtool_cmd_speed_set(cmd, phydev->speed);
cmd->duplex = phydev->duplex;
cmd->port = PORT_MII;
cmd->phy_address = phydev->addr;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e870c0698bbe..ff109fe5af6b 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -538,7 +538,7 @@ EXPORT_SYMBOL(phy_detach);
/* Generic PHY support and helper functions */
/**
- * genphy_config_advert - sanitize and advertise auto-negotation parameters
+ * genphy_config_advert - sanitize and advertise auto-negotiation parameters
* @phydev: target phy_device struct
*
* Description: Writes MII_ADVERTISE with the appropriate values,
@@ -687,7 +687,7 @@ int genphy_config_aneg(struct phy_device *phydev)
return result;
if (result == 0) {
- /* Advertisment hasn't changed, but maybe aneg was never on to
+ /* Advertisement hasn't changed, but maybe aneg was never on to
* begin with? Or maybe phy was isolated? */
int ctl = phy_read(phydev, MII_BMCR);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 9f6d670748d1..4609bc0e2f56 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1448,7 +1448,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/*
*check if we are on the last channel or
- *we exceded the lenght of the data to
+ *we exceded the length of the data to
*fragment
*/
if ((nfree <= 0) || (flen > len))
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4e6b72f57de8..2573f525f11c 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -178,7 +178,7 @@ ppp_print_buffer (const char *name, const __u8 *buf, int count)
* way to fix this is to use a rwlock in the tty struct, but for now
* we use a single global rwlock for all ttys in ppp line discipline.
*
- * FIXME: Fixed in tty_io nowdays.
+ * FIXME: Fixed in tty_io nowadays.
*/
static DEFINE_RWLOCK(disc_data_lock);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 78c0e3c9b2b5..718879b35b7d 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -115,7 +115,7 @@ struct pppoe_net {
* 2) Session stage (MAC and SID are known)
*
* Ethernet frames have a special tag for this but
- * we use simplier approach based on session id
+ * we use simpler approach based on session id
*/
static inline bool stage_session(__be16 sid)
{
@@ -317,7 +317,7 @@ static void pppoe_flush_dev(struct net_device *dev)
lock_sock(sk);
if (po->pppoe_dev == dev &&
- sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
pppox_unbind_sock(sk);
sk->sk_state = PPPOX_ZOMBIE;
sk->sk_state_change(sk);
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index 51dfcf8023c7..1286fe212dc4 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -175,6 +175,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
struct pptp_opt *opt = &po->proto.pptp;
struct pptp_gre_header *hdr;
unsigned int header_len = sizeof(*hdr);
+ struct flowi4 fl4;
int islcp;
int len;
unsigned char *data;
@@ -189,7 +190,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
- rt = ip_route_output_ports(&init_net, NULL,
+ rt = ip_route_output_ports(&init_net, &fl4, NULL,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0, IPPROTO_GRE,
@@ -270,8 +271,8 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
iph->frag_off = 0;
iph->protocol = IPPROTO_GRE;
iph->tos = 0;
- iph->daddr = rt->rt_dst;
- iph->saddr = rt->rt_src;
+ iph->daddr = fl4.daddr;
+ iph->saddr = fl4.saddr;
iph->ttl = ip4_dst_hoplimit(&rt->dst);
iph->tot_len = htons(skb->len);
@@ -434,6 +435,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
struct pppox_sock *po = pppox_sk(sk);
struct pptp_opt *opt = &po->proto.pptp;
struct rtable *rt;
+ struct flowi4 fl4;
int error = 0;
if (sp->sa_protocol != PX_PROTO_PPTP)
@@ -463,7 +465,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.private = sk;
po->chan.ops = &pptp_chan_ops;
- rt = ip_route_output_ports(&init_net, sk,
+ rt = ip_route_output_ports(&init_net, &fl4, sk,
opt->dst_addr.sin_addr.s_addr,
opt->src_addr.sin_addr.s_addr,
0, 0,
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 5ecfa4b1e758..b1f251da1535 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -632,7 +632,7 @@ static inline void gelic_card_enable_rxdmac(struct gelic_card *card)
* @card: card structure
*
* gelic_card_disable_rxdmac terminates processing on the DMA controller by
- * turing off DMA and issueing a force end
+ * turing off DMA and issuing a force end
*/
static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
{
@@ -650,7 +650,7 @@ static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
* @card: card structure
*
* gelic_card_disable_txdmac terminates processing on the DMA controller by
- * turing off DMA and issueing a force end
+ * turing off DMA and issuing a force end
*/
static inline void gelic_card_disable_txdmac(struct gelic_card *card)
{
@@ -951,7 +951,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
skb->protocol = eth_type_trans(skb, netdev);
/* checksum offload */
- if (card->rx_csum) {
+ if (netdev->features & NETIF_F_RXCSUM) {
if ((data_status & GELIC_DESCR_DATA_STATUS_CHK_MASK) &&
(!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1243,17 +1243,17 @@ static int gelic_ether_get_settings(struct net_device *netdev,
switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) {
case GELIC_LV1_ETHER_SPEED_10:
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
break;
case GELIC_LV1_ETHER_SPEED_100:
- cmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(cmd, SPEED_100);
break;
case GELIC_LV1_ETHER_SPEED_1000:
- cmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
break;
default:
pr_info("%s: speed unknown\n", __func__);
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
break;
}
@@ -1312,21 +1312,6 @@ static int gelic_ether_set_settings(struct net_device *netdev,
return 0;
}
-u32 gelic_net_get_rx_csum(struct net_device *netdev)
-{
- struct gelic_card *card = netdev_card(netdev);
-
- return card->rx_csum;
-}
-
-int gelic_net_set_rx_csum(struct net_device *netdev, u32 data)
-{
- struct gelic_card *card = netdev_card(netdev);
-
- card->rx_csum = data;
- return 0;
-}
-
static void gelic_net_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
@@ -1411,10 +1396,6 @@ static const struct ethtool_ops gelic_ether_ethtool_ops = {
.get_settings = gelic_ether_get_settings,
.set_settings = gelic_ether_set_settings,
.get_link = ethtool_op_get_link,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .get_rx_csum = gelic_net_get_rx_csum,
- .set_rx_csum = gelic_net_set_rx_csum,
.get_wol = gelic_net_get_wol,
.set_wol = gelic_net_set_wol,
};
@@ -1512,7 +1493,11 @@ int __devinit gelic_net_setup_netdev(struct net_device *netdev,
int status;
u64 v1, v2;
+ netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
netdev->features = NETIF_F_IP_CSUM;
+ if (GELIC_CARD_RX_CSUM_DEFAULT)
+ netdev->features |= NETIF_F_RXCSUM;
status = lv1_net_control(bus_id(card), dev_id(card),
GELIC_LV1_GET_MAC_ADDRESS,
@@ -1756,7 +1741,6 @@ static int __devinit ps3_gelic_driver_probe(struct ps3_system_bus_device *dev)
/* setup card structure */
card->irq_mask = GELIC_CARD_RXINT | GELIC_CARD_TXINT |
GELIC_CARD_PORT_STATUS_CHANGED;
- card->rx_csum = GELIC_CARD_RX_CSUM_DEFAULT;
if (gelic_card_init_chain(card, &card->tx_chain,
diff --git a/drivers/net/ps3_gelic_net.h b/drivers/net/ps3_gelic_net.h
index 32521ae5e824..d9a55b93898b 100644
--- a/drivers/net/ps3_gelic_net.h
+++ b/drivers/net/ps3_gelic_net.h
@@ -117,7 +117,7 @@ enum gelic_descr_rx_error {
GELIC_DESCR_RXDATAERR = 0x00020000, /* IP packet format error */
GELIC_DESCR_RXCALERR = 0x00010000, /* cariier extension length
* error */
- GELIC_DESCR_RXCREXERR = 0x00008000, /* carrier extention error */
+ GELIC_DESCR_RXCREXERR = 0x00008000, /* carrier extension error */
GELIC_DESCR_RXMLTCST = 0x00004000, /* multicast address frame */
/* bit 13..0 reserved */
};
@@ -290,7 +290,6 @@ struct gelic_card {
struct gelic_descr_chain tx_chain;
struct gelic_descr_chain rx_chain;
int rx_dma_restart_required;
- int rx_csum;
/*
* tx_lock guards tx descriptor list and
* tx_dma_progress.
@@ -377,8 +376,6 @@ extern int gelic_net_setup_netdev(struct net_device *netdev,
/* shared ethtool ops */
extern void gelic_net_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info);
-extern u32 gelic_net_get_rx_csum(struct net_device *netdev);
-extern int gelic_net_set_rx_csum(struct net_device *netdev, u32 data);
extern void gelic_net_poll_controller(struct net_device *netdev);
#endif /* _GELIC_NET_H */
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 4a624a29393f..2e62938c0f82 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -814,7 +814,7 @@ static int gelic_wl_set_auth(struct net_device *netdev,
* you will not decide suitable cipher from
* its beacon.
* You should have knowledge about the AP's
- * cipher infomation in other method prior to
+ * cipher information in other method prior to
* the association.
*/
if (!precise_ie())
@@ -2581,10 +2581,6 @@ static const struct net_device_ops gelic_wl_netdevice_ops = {
static const struct ethtool_ops gelic_wl_ethtool_ops = {
.get_drvinfo = gelic_net_get_drvinfo,
.get_link = gelic_wl_get_link,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .get_rx_csum = gelic_net_get_rx_csum,
- .set_rx_csum = gelic_net_set_rx_csum,
};
static void __devinit gelic_wl_setup_netdev_ops(struct net_device *netdev)
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 1b63c8aef121..89f7540d90f9 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -462,7 +462,7 @@ static u32 hash_function(unsigned char *mac_addr_orig)
* pep - ETHERNET .
* mac_addr - MAC address.
* skip - if 1, skip this address.Used in case of deleting an entry which is a
- * part of chain in the hash table.We cant just delete the entry since
+ * part of chain in the hash table.We can't just delete the entry since
* that will break the chain.We need to defragment the tables time to
* time.
* rd - 0 Discard packet upon match.
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 348b4f1367c9..d495a6859fd9 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1725,7 +1725,7 @@ static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
}
ecmd->advertising = ql_supported_modes(qdev);
ecmd->autoneg = ql_get_auto_cfg_status(qdev);
- ecmd->speed = ql_get_speed(qdev);
+ ethtool_cmd_speed_set(ecmd, ql_get_speed(qdev));
ecmd->duplex = ql_get_full_dup(qdev);
return 0;
}
@@ -3468,7 +3468,7 @@ static int ql_adapter_up(struct ql3_adapter *qdev)
{
struct net_device *ndev = qdev->ndev;
int err;
- unsigned long irq_flags = IRQF_SAMPLE_RANDOM | IRQF_SHARED;
+ unsigned long irq_flags = IRQF_SHARED;
unsigned long hw_flags;
if (ql_alloc_mem_resources(qdev)) {
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 3362a661248c..73e234366a82 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -770,7 +770,7 @@ enum {
FM93C56A_WDS = 0x0,
FM93C56A_ERASE = 0x3,
FM93C56A_ERASE_ALL = 0x0,
-/* Command Extentions */
+/* Command Extensions */
FM93C56A_WEN_EXT = 0x3,
FM93C56A_WRITE_ALL_EXT = 0x1,
FM93C56A_WDS_EXT = 0x0,
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index dc44564ef6f9..480ef5cb6ef9 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -29,13 +29,15 @@
#include <linux/io.h>
#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
#include "qlcnic_hdr.h"
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 15
-#define QLCNIC_LINUX_VERSIONID "5.0.15"
+#define _QLCNIC_LINUX_SUBVERSION 18
+#define QLCNIC_LINUX_VERSIONID "5.0.18"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -93,12 +95,11 @@
#define TX_IP_PKT 0x04
#define TX_TCP_LSO 0x05
#define TX_TCP_LSO6 0x06
-#define TX_IPSEC 0x07
-#define TX_IPSEC_CMD 0x0a
#define TX_TCPV6_PKT 0x0b
#define TX_UDPV6_PKT 0x0c
/* Tx defines */
+#define QLCNIC_MAX_FRAGS_PER_TX 14
#define MAX_TSO_HEADER_DESC 2
#define MGMT_CMD_DESC_RESV 4
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
@@ -117,7 +118,6 @@
#define PHAN_PEG_RCV_INITIALIZED 0xff01
#define NUM_RCV_DESC_RINGS 3
-#define NUM_STS_DESC_RINGS 4
#define RCV_RING_NORMAL 0
#define RCV_RING_JUMBO 1
@@ -200,7 +200,7 @@ struct rcv_desc {
__le16 reserved;
__le32 buffer_length; /* allocated buffer length (usually 2K) */
__le64 addr_buffer;
-};
+} __packed;
/* opcode field in status_desc */
#define QLCNIC_SYN_OFFLOAD 0x03
@@ -292,6 +292,7 @@ struct uni_data_desc{
/* Flash Defines and Structures */
#define QLCNIC_FLT_LOCATION 0x3F1000
#define QLCNIC_FW_IMAGE_REGION 0x74
+#define QLCNIC_BOOTLD_REGION 0X72
struct qlcnic_flt_header {
u16 version;
u16 len;
@@ -306,7 +307,7 @@ struct qlcnic_flt_entry {
u8 reserved1;
u32 size;
u32 start_addr;
- u32 end_add;
+ u32 end_addr;
};
/* Magic number to let user know flash is programmed */
@@ -365,12 +366,6 @@ struct qlcnic_skb_frag {
u64 length;
};
-struct qlcnic_recv_crb {
- u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
- u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
- u32 sw_int_mask[NUM_STS_DESC_RINGS];
-};
-
/* Following defines are for the state of the buffers */
#define QLCNIC_BUFFER_FREE 0
#define QLCNIC_BUFFER_BUSY 1
@@ -387,10 +382,10 @@ struct qlcnic_cmd_buffer {
/* In rx_buffer, we do not need multiple fragments as is a single buffer */
struct qlcnic_rx_buffer {
- struct list_head list;
+ u16 ref_handle;
struct sk_buff *skb;
+ struct list_head list;
u64 dma;
- u16 ref_handle;
};
/* Board types */
@@ -398,6 +393,48 @@ struct qlcnic_rx_buffer {
#define QLCNIC_XGBE 0x02
/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+
+#define QLCNIC_INTR_DEFAULT 0x04
+#define QLCNIC_CONFIG_INTR_COALESCE 3
+
+struct qlcnic_nic_intr_coalesce {
+ u8 type;
+ u8 sts_ring_mask;
+ u16 rx_packets;
+ u16 rx_time_us;
+ u16 flag;
+ u32 timer_out;
+};
+
+struct qlcnic_dump_template_hdr {
+ __le32 type;
+ __le32 offset;
+ __le32 size;
+ __le32 cap_mask;
+ __le32 num_entries;
+ __le32 version;
+ __le32 timestamp;
+ __le32 checksum;
+ __le32 drv_cap_mask;
+ __le32 sys_info[3];
+ __le32 saved_state[16];
+ __le32 cap_sizes[8];
+ __le32 rsvd[0];
+};
+
+struct qlcnic_fw_dump {
+ u8 clr; /* flag to indicate if dump is cleared */
+ u32 size; /* total size of the dump */
+ void *data; /* dump data area */
+ struct qlcnic_dump_template_hdr *tmpl_hdr;
+};
+
+/*
* One hardware_context{} per adapter
* contains interrupt info as well shared hardware info.
*/
@@ -415,6 +452,9 @@ struct qlcnic_hardware_context {
u8 linkup;
u16 port_type;
u16 board_type;
+
+ struct qlcnic_nic_intr_coalesce coal;
+ struct qlcnic_fw_dump fw_dump;
};
struct qlcnic_adapter_stats {
@@ -442,50 +482,49 @@ struct qlcnic_adapter_stats {
* be one Rcv Descriptor for normal packets, one for jumbo and may be others.
*/
struct qlcnic_host_rds_ring {
- u32 producer;
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct qlcnic_rx_buffer *rx_buf_arr;
u32 num_desc;
+ u32 producer;
u32 dma_size;
u32 skb_size;
u32 flags;
- void __iomem *crb_rcv_producer;
- struct rcv_desc *desc_head;
- struct qlcnic_rx_buffer *rx_buf_arr;
struct list_head free_list;
spinlock_t lock;
dma_addr_t phys_addr;
-};
+} ____cacheline_internodealigned_in_smp;
struct qlcnic_host_sds_ring {
u32 consumer;
u32 num_desc;
void __iomem *crb_sts_consumer;
- void __iomem *crb_intr_mask;
struct status_desc *desc_head;
struct qlcnic_adapter *adapter;
struct napi_struct napi;
struct list_head free_list[NUM_RCV_DESC_RINGS];
+ void __iomem *crb_intr_mask;
int irq;
dma_addr_t phys_addr;
char name[IFNAMSIZ+4];
-};
+} ____cacheline_internodealigned_in_smp;
struct qlcnic_host_tx_ring {
u32 producer;
- __le32 *hw_consumer;
u32 sw_consumer;
- void __iomem *crb_cmd_producer;
u32 num_desc;
-
- struct netdev_queue *txq;
-
- struct qlcnic_cmd_buffer *cmd_buf_arr;
+ void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ __le32 *hw_consumer;
+
dma_addr_t phys_addr;
dma_addr_t hw_cons_phys_addr;
-};
+ struct netdev_queue *txq;
+} ____cacheline_internodealigned_in_smp;
/*
* Receive context. There is one such structure per instance of the
@@ -494,12 +533,12 @@ struct qlcnic_host_tx_ring {
* present elsewhere.
*/
struct qlcnic_recv_context {
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_sds_ring *sds_rings;
u32 state;
u16 context_id;
u16 virt_port;
- struct qlcnic_host_rds_ring *rds_rings;
- struct qlcnic_host_sds_ring *sds_rings;
};
/* HW context creation */
@@ -538,9 +577,6 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
-#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
-#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
-#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
@@ -549,17 +585,11 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
-#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
-#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
-#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
-#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
-#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
#define QLCNIC_CDRP_CMD_MAC_ADDRESS 0x0000001f
#define QLCNIC_CDRP_CMD_GET_PCI_INFO 0x00000020
#define QLCNIC_CDRP_CMD_GET_NIC_INFO 0x00000021
#define QLCNIC_CDRP_CMD_SET_NIC_INFO 0x00000022
-#define QLCNIC_CDRP_CMD_RESET_NPAR 0x00000023
#define QLCNIC_CDRP_CMD_GET_ESWITCH_CAPABILITY 0x00000024
#define QLCNIC_CDRP_CMD_TOGGLE_ESWITCH 0x00000025
#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATUS 0x00000026
@@ -567,8 +597,12 @@ struct qlcnic_recv_context {
#define QLCNIC_CDRP_CMD_CONFIGURE_ESWITCH 0x00000028
#define QLCNIC_CDRP_CMD_GET_ESWITCH_PORT_CONFIG 0x00000029
#define QLCNIC_CDRP_CMD_GET_ESWITCH_STATS 0x0000002a
+#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
+#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
+#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_NOT_SUPPORTED 9
#define QLCNIC_RCODE_TIMEOUT 17
#define QLCNIC_DESTROY_CTX_RESET 0
@@ -597,14 +631,14 @@ struct qlcnic_hostrq_sds_ring {
__le32 ring_size; /* Ring entries */
__le16 msi_index;
__le16 rsvd; /* Padding */
-};
+} __packed;
struct qlcnic_hostrq_rds_ring {
__le64 host_phys_addr; /* Ring base addr */
__le64 buff_size; /* Packet buffer size */
__le32 ring_size; /* Ring entries */
__le32 ring_kind; /* Class of ring */
-};
+} __packed;
struct qlcnic_hostrq_rx_ctx {
__le64 host_rsp_dma_addr; /* Response dma'd here */
@@ -625,17 +659,17 @@ struct qlcnic_hostrq_rx_ctx {
- N hostrq_rds_rings
- N hostrq_sds_rings */
char data[0];
-};
+} __packed;
struct qlcnic_cardrsp_rds_ring{
__le32 host_producer_crb; /* Crb to use */
__le32 rsvd1; /* Padding */
-};
+} __packed;
struct qlcnic_cardrsp_sds_ring {
__le32 host_consumer_crb; /* Crb to use */
__le32 interrupt_crb; /* Crb to use */
-};
+} __packed;
struct qlcnic_cardrsp_rx_ctx {
/* These ring offsets are relative to data[0] below */
@@ -654,7 +688,7 @@ struct qlcnic_cardrsp_rx_ctx {
- N cardrsp_rds_rings
- N cardrs_sds_rings */
char data[0];
-};
+} __packed;
#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
(sizeof(HOSTRQ_RX) + \
@@ -674,7 +708,7 @@ struct qlcnic_hostrq_cds_ring {
__le64 host_phys_addr; /* Ring base addr */
__le32 ring_size; /* Ring entries */
__le32 rsvd; /* Padding */
-};
+} __packed;
struct qlcnic_hostrq_tx_ctx {
__le64 host_rsp_dma_addr; /* Response dma'd here */
@@ -689,12 +723,12 @@ struct qlcnic_hostrq_tx_ctx {
__le16 rsvd3; /* Padding */
struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
u8 reserved[128]; /* future expansion */
-};
+} __packed;
struct qlcnic_cardrsp_cds_ring {
__le32 host_producer_crb; /* Crb to use */
__le32 interrupt_crb; /* Crb to use */
-};
+} __packed;
struct qlcnic_cardrsp_tx_ctx {
__le32 host_ctx_state; /* Starting state */
@@ -703,7 +737,7 @@ struct qlcnic_cardrsp_tx_ctx {
u8 virt_port; /* Virtual/Logical id of port */
struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
u8 reserved[128]; /* future expansion */
-};
+} __packed;
#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
@@ -737,40 +771,6 @@ struct qlcnic_mac_list_s {
uint8_t mac_addr[ETH_ALEN+2];
};
-/*
- * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
- * adjusted based on configured MTU.
- */
-#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
-#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
-#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
-#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
-
-#define QLCNIC_INTR_DEFAULT 0x04
-
-union qlcnic_nic_intr_coalesce_data {
- struct {
- u16 rx_packets;
- u16 rx_time_us;
- u16 tx_packets;
- u16 tx_time_us;
- } data;
- u64 word;
-};
-
-struct qlcnic_nic_intr_coalesce {
- u16 stats_time_us;
- u16 rate_sample_time;
- u16 flags;
- u16 rsvd_1;
- u32 low_threshold;
- u32 high_threshold;
- union qlcnic_nic_intr_coalesce_data normal;
- union qlcnic_nic_intr_coalesce_data low;
- union qlcnic_nic_intr_coalesce_data high;
- union qlcnic_nic_intr_coalesce_data irq;
-};
-
#define QLCNIC_HOST_REQUEST 0x13
#define QLCNIC_REQUEST 0x14
@@ -782,50 +782,20 @@ struct qlcnic_nic_intr_coalesce {
/*
* Driver --> Firmware
*/
-#define QLCNIC_H2C_OPCODE_START 0
-#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1
-#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2
-#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
-#define QLCNIC_H2C_OPCODE_CONFIG_LED 4
-#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
-#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6
-#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7
-#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8
-#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
-#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
-#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
-#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
-#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
-#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
-#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
-#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
-#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
-#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
-#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
-#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
-#define QLCNIC_C2C_OPCODE 22
-#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
-#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
-#define QLCNIC_H2C_OPCODE_LAST 25
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS 0x1
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 0x3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED 0x4
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST 0x7
+#define QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE 0xc
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 0x12
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 0x15
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 0x17
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 0x18
/*
* Firmware --> Driver
*/
-#define QLCNIC_C2H_OPCODE_START 128
-#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
-#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
-#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
-#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
-#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
-#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
-#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
-#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
-#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
-#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
-#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
-#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
-#define QLCNIC_C2H_OPCODE_LAST 142
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -894,7 +864,7 @@ struct qlcnic_nic_req {
__le64 qhdr;
__le64 req_hdr;
__le64 words[6];
-};
+} __packed;
struct qlcnic_mac_req {
u8 op;
@@ -905,7 +875,7 @@ struct qlcnic_mac_req {
struct qlcnic_vlan_req {
__le16 vlan_id;
__le16 rsvd[3];
-};
+} __packed;
struct qlcnic_ipaddr {
__be32 ipv4;
@@ -928,7 +898,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
-#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
+#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
+#define QLCNIC_MIN_NUM_RSS_RINGS 2
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
@@ -941,6 +912,7 @@ struct qlcnic_ipaddr {
#define __QLCNIC_RESETTING 2
#define __QLCNIC_START_FW 4
#define __QLCNIC_AER 5
+#define __QLCNIC_DIAG_RES_ALLOC 6
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -964,14 +936,14 @@ struct qlcnic_filter_hash {
};
struct qlcnic_adapter {
- struct qlcnic_hardware_context ahw;
-
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
struct net_device *netdev;
struct pci_dev *pdev;
- struct list_head mac_list;
- spinlock_t tx_clean_lock;
- spinlock_t mac_learn_lock;
+ unsigned long state;
+ u32 flags;
u16 num_txd;
u16 num_rxd;
@@ -982,14 +954,12 @@ struct qlcnic_adapter {
u8 max_rds_rings;
u8 max_sds_rings;
u8 msix_supported;
- u8 rx_csum;
u8 portnum;
u8 physical_port;
u8 reset_context;
u8 mc_enabled;
u8 max_mc_count;
- u8 rss_supported;
u8 fw_wait_cnt;
u8 fw_fail_cnt;
u8 tx_timeo_cnt;
@@ -1014,7 +984,6 @@ struct qlcnic_adapter {
u32 fw_hal_version;
u32 capabilities;
- u32 flags;
u32 irq;
u32 temp;
@@ -1032,31 +1001,29 @@ struct qlcnic_adapter {
u8 mac_addr[ETH_ALEN];
u64 dev_rst_time;
+ unsigned long vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct vlan_group *vlgrp;
struct qlcnic_npar_info *npars;
struct qlcnic_eswitch *eswitch;
struct qlcnic_nic_template *nic_ops;
struct qlcnic_adapter_stats stats;
-
- struct qlcnic_recv_context recv_ctx;
- struct qlcnic_host_tx_ring *tx_ring;
+ struct list_head mac_list;
void __iomem *tgt_mask_reg;
void __iomem *tgt_status_reg;
void __iomem *crb_int_state_reg;
void __iomem *isr_int_vec;
- struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+ struct msix_entry *msix_entries;
struct delayed_work fw_work;
- struct qlcnic_nic_intr_coalesce coal;
struct qlcnic_filter_hash fhash;
- unsigned long state;
+ spinlock_t tx_clean_lock;
+ spinlock_t mac_learn_lock;
__le32 file_prd_off; /*File fw product offset*/
u32 fw_version;
const struct firmware *fw;
@@ -1078,7 +1045,7 @@ struct qlcnic_info {
__le16 min_tx_bw;
__le16 max_tx_bw;
u8 reserved2[104];
-};
+} __packed;
struct qlcnic_pci_info {
__le16 id; /* pci function id */
@@ -1092,7 +1059,7 @@ struct qlcnic_pci_info {
u8 mac[ETH_ALEN];
u8 reserved2[106];
-};
+} __packed;
struct qlcnic_npar_info {
u16 pvid;
@@ -1209,15 +1176,160 @@ struct __qlcnic_esw_statistics {
__le64 local_frames;
__le64 numbytes;
__le64 rsvd[3];
-};
+} __packed;
struct qlcnic_esw_statistics {
struct __qlcnic_esw_statistics rx;
struct __qlcnic_esw_statistics tx;
};
-int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
-int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
+struct qlcnic_common_entry_hdr {
+ __le32 type;
+ __le32 offset;
+ __le32 cap_size;
+ u8 mask;
+ u8 rsvd[2];
+ u8 flags;
+} __packed;
+
+struct __crb {
+ __le32 addr;
+ u8 stride;
+ u8 rsvd1[3];
+ __le32 data_size;
+ __le32 no_ops;
+ __le32 rsvd2[4];
+} __packed;
+
+struct __ctrl {
+ __le32 addr;
+ u8 stride;
+ u8 index_a;
+ __le16 timeout;
+ __le32 data_size;
+ __le32 no_ops;
+ u8 opcode;
+ u8 index_v;
+ u8 shl_val;
+ u8 shr_val;
+ __le32 val1;
+ __le32 val2;
+ __le32 val3;
+} __packed;
+
+struct __cache {
+ __le32 addr;
+ u8 stride;
+ u8 rsvd;
+ __le16 init_tag_val;
+ __le32 size;
+ __le32 no_ops;
+ __le32 ctrl_addr;
+ __le32 ctrl_val;
+ __le32 read_addr;
+ u8 read_addr_stride;
+ u8 read_addr_num;
+ u8 rsvd1[2];
+} __packed;
+
+struct __ocm {
+ u8 rsvd[8];
+ __le32 size;
+ __le32 no_ops;
+ u8 rsvd1[8];
+ __le32 read_addr;
+ __le32 read_addr_stride;
+} __packed;
+
+struct __mem {
+ u8 rsvd[24];
+ __le32 addr;
+ __le32 size;
+} __packed;
+
+struct __mux {
+ __le32 addr;
+ u8 rsvd[4];
+ __le32 size;
+ __le32 no_ops;
+ __le32 val;
+ __le32 val_stride;
+ __le32 read_addr;
+ u8 rsvd2[4];
+} __packed;
+
+struct __queue {
+ __le32 sel_addr;
+ __le16 stride;
+ u8 rsvd[2];
+ __le32 size;
+ __le32 no_ops;
+ u8 rsvd2[8];
+ __le32 read_addr;
+ u8 read_addr_stride;
+ u8 read_addr_cnt;
+ u8 rsvd3[2];
+} __packed;
+
+struct qlcnic_dump_entry {
+ struct qlcnic_common_entry_hdr hdr;
+ union {
+ struct __crb crb;
+ struct __cache cache;
+ struct __ocm ocm;
+ struct __mem mem;
+ struct __mux mux;
+ struct __queue que;
+ struct __ctrl ctrl;
+ } region;
+} __packed;
+
+enum op_codes {
+ QLCNIC_DUMP_NOP = 0,
+ QLCNIC_DUMP_READ_CRB = 1,
+ QLCNIC_DUMP_READ_MUX = 2,
+ QLCNIC_DUMP_QUEUE = 3,
+ QLCNIC_DUMP_BRD_CONFIG = 4,
+ QLCNIC_DUMP_READ_OCM = 6,
+ QLCNIC_DUMP_PEG_REG = 7,
+ QLCNIC_DUMP_L1_DTAG = 8,
+ QLCNIC_DUMP_L1_ITAG = 9,
+ QLCNIC_DUMP_L1_DATA = 11,
+ QLCNIC_DUMP_L1_INST = 12,
+ QLCNIC_DUMP_L2_DTAG = 21,
+ QLCNIC_DUMP_L2_ITAG = 22,
+ QLCNIC_DUMP_L2_DATA = 23,
+ QLCNIC_DUMP_L2_INST = 24,
+ QLCNIC_DUMP_READ_ROM = 71,
+ QLCNIC_DUMP_READ_MEM = 72,
+ QLCNIC_DUMP_READ_CTRL = 98,
+ QLCNIC_DUMP_TLHDR = 99,
+ QLCNIC_DUMP_RDEND = 255
+};
+
+#define QLCNIC_DUMP_WCRB BIT_0
+#define QLCNIC_DUMP_RWCRB BIT_1
+#define QLCNIC_DUMP_ANDCRB BIT_2
+#define QLCNIC_DUMP_ORCRB BIT_3
+#define QLCNIC_DUMP_POLLCRB BIT_4
+#define QLCNIC_DUMP_RD_SAVE BIT_5
+#define QLCNIC_DUMP_WRT_SAVED BIT_6
+#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
+#define QLCNIC_DUMP_SKIP BIT_7
+
+#define QLCNIC_DUMP_MASK_MIN 3
+#define QLCNIC_DUMP_MASK_DEF 0x0f
+#define QLCNIC_DUMP_MASK_MAX 0xff
+#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
+
+struct qlcnic_dump_operations {
+ enum op_codes opcode;
+ u32 (*handler)(struct qlcnic_adapter *,
+ struct qlcnic_dump_entry *, u32 *);
+};
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
+int qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config);
u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
@@ -1263,6 +1375,7 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
+int qlcnic_dump_fw(struct qlcnic_adapter *);
/* Functions from qlcnic_init.c */
int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
@@ -1273,7 +1386,7 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
int qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter);
-int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp);
int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
u8 *bytes, size_t size);
int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
@@ -1293,7 +1406,7 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
-void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring);
int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
void qlcnic_set_multi(struct net_device *netdev);
@@ -1307,6 +1420,8 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
+u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
+int qlcnic_set_features(struct net_device *netdev, u32 features);
int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
@@ -1321,6 +1436,9 @@ u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val);
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data);
+void qlcnic_dev_request_reset(struct qlcnic_adapter *);
/* Management functions */
int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
@@ -1378,8 +1496,7 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
{
- smp_mb();
- if (tx_ring->producer < tx_ring->sw_consumer)
+ if (likely(tx_ring->producer < tx_ring->sw_consumer))
return tx_ring->sw_consumer - tx_ring->producer;
else
return tx_ring->sw_consumer + tx_ring->num_desc -
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 27631f23b3fd..bab041a5c758 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -64,14 +64,105 @@ qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
return rcode;
}
+static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u16 temp_size)
+{
+ uint64_t sum = 0;
+ int count = temp_size / sizeof(uint32_t);
+ while (count-- > 0)
+ sum += *temp_buffer++;
+ while (sum >> 32)
+ sum = (sum & 0xFFFFFFFF) + (sum >> 32);
+ return ~sum;
+}
+
+int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
+{
+ int err, i;
+ u16 temp_size;
+ void *tmp_addr;
+ u32 version, csum, *template, *tmp_buf;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_dump_template_hdr *tmpl_hdr, *tmp_tmpl;
+ dma_addr_t tmp_addr_t = 0;
+
+ ahw = adapter->ahw;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw->pci_func,
+ adapter->fw_hal_version,
+ 0,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_TEMP_SIZE);
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ err = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ dev_err(&adapter->pdev->dev,
+ "Failed to get template size %d\n", err);
+ err = -EIO;
+ return err;
+ }
+ version = QLCRD32(adapter, QLCNIC_ARG3_CRB_OFFSET);
+ temp_size = QLCRD32(adapter, QLCNIC_ARG2_CRB_OFFSET);
+ if (!temp_size)
+ return -EIO;
+
+ tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
+ &tmp_addr_t, GFP_KERNEL);
+ if (!tmp_addr) {
+ dev_err(&adapter->pdev->dev,
+ "Can't get memory for FW dump template\n");
+ return -ENOMEM;
+ }
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw->pci_func,
+ adapter->fw_hal_version,
+ LSD(tmp_addr_t),
+ MSD(tmp_addr_t),
+ temp_size,
+ QLCNIC_CDRP_CMD_GET_TEMP_HDR);
+
+ if (err != QLCNIC_RCODE_SUCCESS) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to get mini dump template header %d\n", err);
+ err = -EIO;
+ goto error;
+ }
+ tmp_tmpl = (struct qlcnic_dump_template_hdr *) tmp_addr;
+ csum = qlcnic_temp_checksum((uint32_t *) tmp_addr, temp_size);
+ if (csum) {
+ dev_err(&adapter->pdev->dev,
+ "Template header checksum validation failed\n");
+ err = -EIO;
+ goto error;
+ }
+ ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
+ if (!ahw->fw_dump.tmpl_hdr) {
+ err = -EIO;
+ goto error;
+ }
+ tmp_buf = (u32 *) tmp_addr;
+ template = (u32 *) ahw->fw_dump.tmpl_hdr;
+ for (i = 0; i < temp_size/sizeof(u32); i++)
+ *template++ = __le32_to_cpu(*tmp_buf++);
+
+ tmpl_hdr = ahw->fw_dump.tmpl_hdr;
+ if (tmpl_hdr->cap_mask > QLCNIC_DUMP_MASK_DEF &&
+ tmpl_hdr->cap_mask <= QLCNIC_DUMP_MASK_MAX)
+ tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
+ else
+ tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+error:
+ dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
+ return err;
+}
+
int
qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
{
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
if (qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
recv_ctx->context_id,
mtu,
@@ -102,12 +193,12 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
u64 phys_addr;
- int i, nrds_rings, nsds_rings;
+ u8 i, nrds_rings, nsds_rings;
size_t rq_size, rsp_size;
u32 cap, reg, val, reg2;
int err;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
nrds_rings = adapter->max_rds_rings;
nsds_rings = adapter->max_sds_rings;
@@ -119,14 +210,14 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
nsds_rings);
- addr = pci_alloc_consistent(adapter->pdev,
- rq_size, &hostrq_phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
prq = (struct qlcnic_hostrq_rx_ctx *)addr;
- addr = pci_alloc_consistent(adapter->pdev,
- rsp_size, &cardrsp_phys_addr);
+ addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &cardrsp_phys_addr, GFP_KERNEL);
if (addr == NULL) {
err = -ENOMEM;
goto out_free_rq;
@@ -151,7 +242,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
prq->num_rds_rings = cpu_to_le16(nrds_rings);
prq->num_sds_rings = cpu_to_le16(nsds_rings);
- prq->rds_ring_offset = cpu_to_le32(0);
+ prq->rds_ring_offset = 0;
val = le32_to_cpu(prq->rds_ring_offset) +
(sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
@@ -187,7 +278,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
phys_addr = hostrq_phys_addr;
err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
(u32)(phys_addr >> 32),
(u32)(phys_addr & 0xffffffff),
@@ -207,7 +298,7 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
rds_ring = &recv_ctx->rds_rings[i];
reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
- rds_ring->crb_rcv_producer = adapter->ahw.pci_base0 + reg;
+ rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
}
prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -219,8 +310,8 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
- sds_ring->crb_sts_consumer = adapter->ahw.pci_base0 + reg;
- sds_ring->crb_intr_mask = adapter->ahw.pci_base0 + reg2;
+ sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
+ sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
}
recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
@@ -228,19 +319,20 @@ qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
recv_ctx->virt_port = prsp->virt_port;
out_free_rsp:
- pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
+ cardrsp_phys_addr);
out_free_rq:
- pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
return err;
}
static void
qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
{
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
recv_ctx->context_id,
QLCNIC_DESTROY_CTX_RESET,
@@ -274,14 +366,14 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
*(tx_ring->hw_consumer) = 0;
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
- rq_addr = pci_alloc_consistent(adapter->pdev,
- rq_size, &rq_phys_addr);
+ rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
- rsp_addr = pci_alloc_consistent(adapter->pdev,
- rsp_size, &rsp_phys_addr);
+ rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
@@ -313,7 +405,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
phys_addr = rq_phys_addr;
err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
(u32)(phys_addr >> 32),
((u32)phys_addr & 0xffffffff),
@@ -322,7 +414,7 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
if (err == QLCNIC_RCODE_SUCCESS) {
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
- tx_ring->crb_cmd_producer = adapter->ahw.pci_base0 + temp;
+ tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
adapter->tx_context_id =
le16_to_cpu(prsp->context_id);
@@ -332,10 +424,11 @@ qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
err = -EIO;
}
- pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
+ rsp_phys_addr);
out_free_rq:
- pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+ dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
return err;
}
@@ -344,7 +437,7 @@ static void
qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
{
if (qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
adapter->tx_context_id,
QLCNIC_DESTROY_CTX_RESET,
@@ -357,33 +450,15 @@ qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
}
int
-qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
-{
-
- if (qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
- adapter->fw_hal_version,
- reg,
- 0,
- 0,
- QLCNIC_CDRP_CMD_READ_PHY)) {
-
- return -EIO;
- }
-
- return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
-}
-
-int
-qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
+qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
{
return qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
- reg,
- val,
+ config,
+ 0,
0,
- QLCNIC_CDRP_CMD_WRITE_PHY);
+ QLCNIC_CDRP_CMD_CONFIG_PORT);
}
int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
@@ -398,20 +473,19 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
- recv_ctx = &adapter->recv_ctx;
+ recv_ctx = adapter->recv_ctx;
tx_ring = adapter->tx_ring;
- tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32),
- &tx_ring->hw_cons_phys_addr);
+ tx_ring->hw_consumer = (__le32 *) dma_alloc_coherent(&pdev->dev,
+ sizeof(u32), &tx_ring->hw_cons_phys_addr, GFP_KERNEL);
if (tx_ring->hw_consumer == NULL) {
dev_err(&pdev->dev, "failed to allocate tx consumer\n");
return -ENOMEM;
}
- *(tx_ring->hw_consumer) = 0;
/* cmd desc ring */
- addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
- &tx_ring->phys_addr);
+ addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
@@ -423,9 +497,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
- addr = pci_alloc_consistent(adapter->pdev,
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
RCV_DESC_RINGSIZE(rds_ring),
- &rds_ring->phys_addr);
+ &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev,
"failed to allocate rds ring [%d]\n", ring);
@@ -439,9 +513,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- addr = pci_alloc_consistent(adapter->pdev,
+ addr = dma_alloc_coherent(&adapter->pdev->dev,
STATUS_DESC_RINGSIZE(sds_ring),
- &sds_ring->phys_addr);
+ &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) {
dev_err(&pdev->dev,
"failed to allocate sds ring [%d]\n", ring);
@@ -501,11 +575,11 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
struct qlcnic_host_tx_ring *tx_ring;
int ring;
- recv_ctx = &adapter->recv_ctx;
+ recv_ctx = adapter->recv_ctx;
tx_ring = adapter->tx_ring;
if (tx_ring->hw_consumer != NULL) {
- pci_free_consistent(adapter->pdev,
+ dma_free_coherent(&adapter->pdev->dev,
sizeof(u32),
tx_ring->hw_consumer,
tx_ring->hw_cons_phys_addr);
@@ -513,7 +587,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
if (tx_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
+ dma_free_coherent(&adapter->pdev->dev,
TX_DESC_RINGSIZE(tx_ring),
tx_ring->desc_head, tx_ring->phys_addr);
tx_ring->desc_head = NULL;
@@ -523,7 +597,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
rds_ring = &recv_ctx->rds_rings[ring];
if (rds_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
+ dma_free_coherent(&adapter->pdev->dev,
RCV_DESC_RINGSIZE(rds_ring),
rds_ring->desc_head,
rds_ring->phys_addr);
@@ -535,7 +609,7 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
sds_ring = &recv_ctx->sds_rings[ring];
if (sds_ring->desc_head != NULL) {
- pci_free_consistent(adapter->pdev,
+ dma_free_coherent(&adapter->pdev->dev,
STATUS_DESC_RINGSIZE(sds_ring),
sds_ring->desc_head,
sds_ring->phys_addr);
@@ -551,9 +625,9 @@ int qlcnic_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
int err;
u32 arg1;
- arg1 = adapter->ahw.pci_func | BIT_8;
+ arg1 = adapter->ahw->pci_func | BIT_8;
err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
arg1,
0,
@@ -582,15 +656,15 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
void *nic_info_addr;
size_t nic_size = sizeof(struct qlcnic_info);
- nic_info_addr = pci_alloc_consistent(adapter->pdev,
- nic_size, &nic_dma_t);
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
memset(nic_info_addr, 0, nic_size);
nic_info = (struct qlcnic_info *) nic_info_addr;
err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
MSD(nic_dma_t),
LSD(nic_dma_t),
@@ -623,7 +697,8 @@ int qlcnic_get_nic_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
- pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
+ dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+ nic_dma_t);
return err;
}
@@ -639,8 +714,8 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return err;
- nic_info_addr = pci_alloc_consistent(adapter->pdev, nic_size,
- &nic_dma_t);
+ nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -659,7 +734,7 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
MSD(nic_dma_t),
LSD(nic_dma_t),
@@ -672,7 +747,8 @@ int qlcnic_set_nic_info(struct qlcnic_adapter *adapter, struct qlcnic_info *nic)
err = -EIO;
}
- pci_free_consistent(adapter->pdev, nic_size, nic_info_addr, nic_dma_t);
+ dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
+ nic_dma_t);
return err;
}
@@ -687,15 +763,15 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
size_t npar_size = sizeof(struct qlcnic_pci_info);
size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
- pci_info_addr = pci_alloc_consistent(adapter->pdev, pci_size,
- &pci_info_dma_t);
+ pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
if (!pci_info_addr)
return -ENOMEM;
memset(pci_info_addr, 0, pci_size);
npar = (struct qlcnic_pci_info *) pci_info_addr;
err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
MSD(pci_info_dma_t),
LSD(pci_info_dma_t),
@@ -721,7 +797,7 @@ int qlcnic_get_pci_info(struct qlcnic_adapter *adapter,
err = -EIO;
}
- pci_free_consistent(adapter->pdev, pci_size, pci_info_addr,
+ dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
pci_info_dma_t);
return err;
}
@@ -741,7 +817,7 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
arg1 |= pci_func << 8;
err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
arg1,
0,
@@ -775,14 +851,14 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
return -ENOMEM;
if (adapter->op_mode != QLCNIC_MGMT_FUNC &&
- func != adapter->ahw.pci_func) {
+ func != adapter->ahw->pci_func) {
dev_err(&adapter->pdev->dev,
"Not privilege to query stats for func=%d", func);
return -EIO;
}
- stats_addr = pci_alloc_consistent(adapter->pdev, stats_size,
- &stats_dma_t);
+ stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr) {
dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
return -ENOMEM;
@@ -793,7 +869,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
arg1 |= rx_tx << 15 | stats_size << 16;
err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
arg1,
MSD(stats_dma_t),
@@ -816,7 +892,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
esw_stats->numbytes = le64_to_cpu(stats->numbytes);
}
- pci_free_consistent(adapter->pdev, stats_size, stats_addr,
+ dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
stats_dma_t);
return err;
}
@@ -900,7 +976,7 @@ int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
arg1 |= BIT_14 | rx_tx << 15;
return qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
arg1,
0,
@@ -921,7 +997,7 @@ __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
u8 pci_func;
pci_func = (*arg1 >> 8);
err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
*arg1,
0,
@@ -999,7 +1075,7 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
}
err = qlcnic_issue_cmd(adapter,
- adapter->ahw.pci_func,
+ adapter->ahw->pci_func,
adapter->fw_hal_version,
arg1,
arg2,
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index 45b2755d6cba..9efc690a289f 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -150,10 +150,10 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int check_sfp_module = 0;
- u16 pcifn = adapter->ahw.pci_func;
+ u16 pcifn = adapter->ahw->pci_func;
/* read which mode */
- if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
@@ -166,11 +166,11 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ADVERTISED_1000baseT_Half |
ADVERTISED_1000baseT_Full);
- ecmd->speed = adapter->link_speed;
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
ecmd->duplex = adapter->link_duplex;
ecmd->autoneg = adapter->link_autoneg;
- } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
u32 val;
val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
@@ -183,15 +183,15 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
if (netif_running(dev) && adapter->has_link_events) {
- ecmd->speed = adapter->link_speed;
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
ecmd->autoneg = adapter->link_autoneg;
ecmd->duplex = adapter->link_duplex;
goto skip;
}
val = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
- ecmd->speed = P3P_LINK_SPEED_MHZ *
- P3P_LINK_SPEED_VAL(pcifn, val);
+ ethtool_cmd_speed_set(ecmd, P3P_LINK_SPEED_MHZ *
+ P3P_LINK_SPEED_VAL(pcifn, val));
ecmd->duplex = DUPLEX_FULL;
ecmd->autoneg = AUTONEG_DISABLE;
} else
@@ -201,7 +201,7 @@ skip:
ecmd->phy_address = adapter->physical_port;
ecmd->transceiver = XCVR_EXTERNAL;
- switch (adapter->ahw.board_type) {
+ switch (adapter->ahw->board_type) {
case QLCNIC_BRDTYPE_P3P_REF_QG:
case QLCNIC_BRDTYPE_P3P_4_GB:
case QLCNIC_BRDTYPE_P3P_4_GB_MM:
@@ -238,7 +238,7 @@ skip:
ecmd->autoneg = AUTONEG_DISABLE;
break;
case QLCNIC_BRDTYPE_P3P_10G_TP:
- if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if (adapter->ahw->port_type == QLCNIC_XGBE) {
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
ecmd->advertising |=
@@ -256,7 +256,7 @@ skip:
break;
default:
dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
- adapter->ahw.board_type);
+ adapter->ahw->board_type);
return -EIO;
}
@@ -284,50 +284,44 @@ skip:
static int
qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
{
+ u32 config = 0;
+ u32 ret = 0;
struct qlcnic_adapter *adapter = netdev_priv(dev);
- __u32 status;
+
+ if (adapter->ahw->port_type != QLCNIC_GBE)
+ return -EOPNOTSUPP;
/* read which mode */
- if (adapter->ahw.port_type == QLCNIC_GBE) {
- /* autonegotiation */
- if (qlcnic_fw_cmd_set_phy(adapter,
- QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
- ecmd->autoneg) != 0)
- return -EIO;
- else
- adapter->link_autoneg = ecmd->autoneg;
+ if (ecmd->duplex)
+ config |= 0x1;
- if (qlcnic_fw_cmd_query_phy(adapter,
- QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
- &status) != 0)
- return -EIO;
+ if (ecmd->autoneg)
+ config |= 0x2;
- switch (ecmd->speed) {
- case SPEED_10:
- qlcnic_set_phy_speed(status, 0);
- break;
- case SPEED_100:
- qlcnic_set_phy_speed(status, 1);
- break;
- case SPEED_1000:
- qlcnic_set_phy_speed(status, 2);
- break;
- }
+ switch (ethtool_cmd_speed(ecmd)) {
+ case SPEED_10:
+ config |= (0 << 8);
+ break;
+ case SPEED_100:
+ config |= (1 << 8);
+ break;
+ case SPEED_1000:
+ config |= (10 << 8);
+ break;
+ default:
+ return -EIO;
+ }
- if (ecmd->duplex == DUPLEX_HALF)
- qlcnic_clear_phy_duplex(status);
- if (ecmd->duplex == DUPLEX_FULL)
- qlcnic_set_phy_duplex(status);
- if (qlcnic_fw_cmd_set_phy(adapter,
- QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
- *((int *)&status)) != 0)
- return -EIO;
- else {
- adapter->link_speed = ecmd->speed;
- adapter->link_duplex = ecmd->duplex;
- }
- } else
+ ret = qlcnic_fw_cmd_set_port(adapter, config);
+
+ if (ret == QLCNIC_RCODE_NOT_SUPPORTED)
return -EOPNOTSUPP;
+ else if (ret)
+ return -EIO;
+
+ adapter->link_speed = ethtool_cmd_speed(ecmd);
+ adapter->link_duplex = ecmd->duplex;
+ adapter->link_autoneg = ecmd->autoneg;
if (!netif_running(dev))
return 0;
@@ -340,14 +334,14 @@ static void
qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_host_sds_ring *sds_ring;
u32 *regs_buff = p;
int ring, i = 0, j = 0;
memset(p, 0, qlcnic_get_regs_len(dev));
regs->version = (QLCNIC_ETHTOOL_REGS_VER << 24) |
- (adapter->ahw.revision_id << 16) | (adapter->pdev)->device;
+ (adapter->ahw->revision_id << 16) | (adapter->pdev)->device;
regs_buff[0] = (0xcafe0000 | (QLCNIC_DEV_INFO_SIZE & 0xffff));
regs_buff[1] = QLCNIC_MGMT_API_VERSION;
@@ -382,7 +376,7 @@ static u32 qlcnic_test_link(struct net_device *dev)
u32 val;
val = QLCRD32(adapter, CRB_XG_STATE_P3P);
- val = XG_LINK_STATE_P3P(adapter->ahw.pci_func, val);
+ val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
return (val == XG_LINK_UP_P3P) ? 0 : 1;
}
@@ -474,6 +468,39 @@ qlcnic_set_ringparam(struct net_device *dev,
return qlcnic_reset_context(adapter);
}
+static void qlcnic_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ channel->max_rx = rounddown_pow_of_two(min_t(int,
+ adapter->max_rx_ques, num_online_cpus()));
+ channel->max_tx = adapter->max_tx_ques;
+
+ channel->rx_count = adapter->max_sds_rings;
+ channel->tx_count = adapter->max_tx_ques;
+}
+
+static int qlcnic_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int err;
+
+ if (channel->other_count || channel->combined_count ||
+ channel->tx_count != channel->max_tx)
+ return -EINVAL;
+
+ err = qlcnic_validate_max_rss(dev, channel->max_rx, channel->rx_count);
+ if (err)
+ return err;
+
+ err = qlcnic_set_max_rss(adapter, channel->rx_count);
+ netdev_info(dev, "allocated 0x%x sds rings\n",
+ adapter->max_sds_rings);
+ return err;
+}
+
static void
qlcnic_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
@@ -482,7 +509,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
int port = adapter->physical_port;
__u32 val;
- if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
@@ -504,7 +531,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
break;
}
- } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
@@ -515,7 +542,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
} else {
dev_err(&netdev->dev, "Unknown board type: %x\n",
- adapter->ahw.port_type);
+ adapter->ahw->port_type);
}
}
@@ -528,7 +555,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
__u32 val;
/* read mode */
- if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if (adapter->ahw->port_type == QLCNIC_GBE) {
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
@@ -571,7 +598,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
break;
}
QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
- } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
if (!pause->rx_pause || pause->autoneg)
return -EOPNOTSUPP;
@@ -593,7 +620,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
} else {
dev_err(&netdev->dev, "Unknown board type: %x\n",
- adapter->ahw.port_type);
+ adapter->ahw->port_type);
}
return 0;
}
@@ -639,8 +666,8 @@ static int qlcnic_irq_test(struct net_device *netdev)
goto clear_it;
adapter->diag_cnt = 0;
- ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
- adapter->fw_hal_version, adapter->portnum,
+ ret = qlcnic_issue_cmd(adapter, adapter->ahw->pci_func,
+ adapter->fw_hal_version, adapter->ahw->pci_func,
0, 0, 0x00000011);
if (ret)
goto done;
@@ -749,14 +776,14 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
return;
memset(&port_stats, 0, sizeof(struct qlcnic_esw_statistics));
- ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func,
+ ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_RX_COUNTER, &port_stats.rx);
if (ret)
return;
qlcnic_fill_device_stats(&index, data, &port_stats.rx);
- ret = qlcnic_get_port_stats(adapter, adapter->ahw.pci_func,
+ ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
if (ret)
return;
@@ -764,115 +791,49 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
qlcnic_fill_device_stats(&index, data, &port_stats.tx);
}
-static int qlcnic_set_tx_csum(struct net_device *dev, u32 data)
-{
- struct qlcnic_adapter *adapter = netdev_priv(dev);
-
- if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
- return -EOPNOTSUPP;
- if (data)
- dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- else
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-
- return 0;
-
-}
-static u32 qlcnic_get_tx_csum(struct net_device *dev)
-{
- return dev->features & NETIF_F_IP_CSUM;
-}
-
-static u32 qlcnic_get_rx_csum(struct net_device *dev)
-{
- struct qlcnic_adapter *adapter = netdev_priv(dev);
- return adapter->rx_csum;
-}
-
-static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct qlcnic_adapter *adapter = netdev_priv(dev);
-
- if ((adapter->flags & QLCNIC_ESWITCH_ENABLED))
- return -EOPNOTSUPP;
- if (!!data) {
- adapter->rx_csum = !!data;
- return 0;
- }
-
- if (dev->features & NETIF_F_LRO) {
- if (qlcnic_config_hw_lro(adapter, QLCNIC_LRO_DISABLED))
- return -EIO;
-
- dev->features &= ~NETIF_F_LRO;
- qlcnic_send_lro_cleanup(adapter);
- dev_info(&adapter->pdev->dev,
- "disabling LRO as rx_csum is off\n");
- }
- adapter->rx_csum = !!data;
- return 0;
-}
-
-static u32 qlcnic_get_tso(struct net_device *dev)
-{
- return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
-}
-
-static int qlcnic_set_tso(struct net_device *dev, u32 data)
-{
- struct qlcnic_adapter *adapter = netdev_priv(dev);
- if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO))
- return -EOPNOTSUPP;
- if (data)
- dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
- else
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
-
- return 0;
-}
-
-static int qlcnic_blink_led(struct net_device *dev, u32 val)
+static int qlcnic_set_led(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int max_sds_rings = adapter->max_sds_rings;
- int dev_down = 0;
- int ret;
-
- if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- dev_down = 1;
- if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
- return -EIO;
- ret = qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST);
- if (ret) {
- clear_bit(__QLCNIC_RESETTING, &adapter->state);
- return ret;
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ if (qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST)) {
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return -EIO;
+ }
+ set_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state);
}
- }
- ret = adapter->nic_ops->config_led(adapter, 1, 0xf);
- if (ret) {
+ if (adapter->nic_ops->config_led(adapter, 1, 0xf) == 0)
+ return 0;
+
dev_err(&adapter->pdev->dev,
"Failed to set LED blink state.\n");
- goto done;
- }
+ break;
- msleep_interruptible(val * 1000);
+ case ETHTOOL_ID_INACTIVE:
+ if (adapter->nic_ops->config_led(adapter, 0, 0xf))
+ dev_err(&adapter->pdev->dev,
+ "Failed to reset LED blink state.\n");
- ret = adapter->nic_ops->config_led(adapter, 0, 0xf);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Failed to reset LED blink state.\n");
- goto done;
+ break;
+
+ default:
+ return -EINVAL;
}
-done:
- if (dev_down) {
+ if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state)) {
qlcnic_diag_free_res(dev, max_sds_rings);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
}
- return ret;
+ return -EIO;
}
static void
@@ -936,8 +897,8 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
*/
if (ethcoal->rx_coalesce_usecs > 0xffff ||
ethcoal->rx_max_coalesced_frames > 0xffff ||
- ethcoal->tx_coalesce_usecs > 0xffff ||
- ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs ||
+ ethcoal->tx_max_coalesced_frames ||
ethcoal->rx_coalesce_usecs_irq ||
ethcoal->rx_max_coalesced_frames_irq ||
ethcoal->tx_coalesce_usecs_irq ||
@@ -959,21 +920,17 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev,
if (!ethcoal->rx_coalesce_usecs ||
!ethcoal->rx_max_coalesced_frames) {
- adapter->coal.flags = QLCNIC_INTR_DEFAULT;
- adapter->coal.normal.data.rx_time_us =
+ adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+ adapter->ahw->coal.rx_time_us =
QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
- adapter->coal.normal.data.rx_packets =
+ adapter->ahw->coal.rx_packets =
QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
} else {
- adapter->coal.flags = 0;
- adapter->coal.normal.data.rx_time_us =
- ethcoal->rx_coalesce_usecs;
- adapter->coal.normal.data.rx_packets =
- ethcoal->rx_max_coalesced_frames;
+ adapter->ahw->coal.flag = 0;
+ adapter->ahw->coal.rx_time_us = ethcoal->rx_coalesce_usecs;
+ adapter->ahw->coal.rx_packets =
+ ethcoal->rx_max_coalesced_frames;
}
- adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
- adapter->coal.normal.data.tx_packets =
- ethcoal->tx_max_coalesced_frames;
qlcnic_config_intr_coalesce(adapter);
@@ -988,66 +945,102 @@ static int qlcnic_get_intr_coalesce(struct net_device *netdev,
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return -EINVAL;
- ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
- ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
- ethcoal->rx_max_coalesced_frames =
- adapter->coal.normal.data.rx_packets;
- ethcoal->tx_max_coalesced_frames =
- adapter->coal.normal.data.tx_packets;
+ ethcoal->rx_coalesce_usecs = adapter->ahw->coal.rx_time_us;
+ ethcoal->rx_max_coalesced_frames = adapter->ahw->coal.rx_packets;
return 0;
}
-static int qlcnic_set_flags(struct net_device *netdev, u32 data)
+static u32 qlcnic_get_msglevel(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- int hw_lro;
-
- if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
- return -EINVAL;
-
- if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
- return -EINVAL;
-
- if (!adapter->rx_csum) {
- dev_info(&adapter->pdev->dev, "rx csum is off, "
- "cannot toggle lro\n");
- return -EINVAL;
- }
- if ((data & ETH_FLAG_LRO) && (netdev->features & NETIF_F_LRO))
- return 0;
-
- if (data & ETH_FLAG_LRO) {
- hw_lro = QLCNIC_LRO_ENABLED;
- netdev->features |= NETIF_F_LRO;
- } else {
- hw_lro = 0;
- netdev->features &= ~NETIF_F_LRO;
- }
+ return adapter->msg_enable;
+}
- if (qlcnic_config_hw_lro(adapter, hw_lro))
- return -EIO;
+static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
- if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
- return -EIO;
+ adapter->msg_enable = msglvl;
+}
+static int
+qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
+ dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+ dump->version = adapter->fw_version;
return 0;
}
-static u32 qlcnic_get_msglevel(struct net_device *netdev)
+static int
+qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
+ void *buffer)
{
+ int i, copy_sz;
+ u32 *hdr_ptr, *data;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- return adapter->msg_enable;
+ if (qlcnic_api_lock(adapter))
+ return -EIO;
+ if (!fw_dump->clr) {
+ netdev_info(netdev, "Dump not available\n");
+ qlcnic_api_unlock(adapter);
+ return -EINVAL;
+ }
+ /* Copy template header first */
+ copy_sz = fw_dump->tmpl_hdr->size;
+ hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
+ data = (u32 *) buffer;
+ for (i = 0; i < copy_sz/sizeof(u32); i++)
+ *data++ = cpu_to_le32(*hdr_ptr++);
+
+ /* Copy captured dump data */
+ memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
+ dump->len = copy_sz + fw_dump->size;
+ dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+
+ /* Free dump area once data has been captured */
+ vfree(fw_dump->data);
+ fw_dump->data = NULL;
+ fw_dump->clr = 0;
+ qlcnic_api_unlock(adapter);
+
+ return 0;
}
-static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
+static int
+qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
+ int ret = 0;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- adapter->msg_enable = msglvl;
+ if (val->flag == QLCNIC_FORCE_FW_DUMP_KEY) {
+ netdev_info(netdev, "Forcing a FW dump\n");
+ qlcnic_dev_request_reset(adapter);
+ } else {
+ if (val->flag > QLCNIC_DUMP_MASK_MAX ||
+ val->flag < QLCNIC_DUMP_MASK_MIN) {
+ netdev_info(netdev,
+ "Invalid dump level: 0x%x\n", val->flag);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (qlcnic_api_lock(adapter))
+ return -EIO;
+ fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff;
+ qlcnic_api_unlock(adapter);
+ netdev_info(netdev, "Driver mask changed to: 0x%x\n",
+ fw_dump->tmpl_hdr->drv_cap_mask);
+ }
+out:
+ return ret;
}
const struct ethtool_ops qlcnic_ethtool_ops = {
@@ -1061,26 +1054,22 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
.get_eeprom = qlcnic_get_eeprom,
.get_ringparam = qlcnic_get_ringparam,
.set_ringparam = qlcnic_set_ringparam,
+ .get_channels = qlcnic_get_channels,
+ .set_channels = qlcnic_set_channels,
.get_pauseparam = qlcnic_get_pauseparam,
.set_pauseparam = qlcnic_set_pauseparam,
- .get_tx_csum = qlcnic_get_tx_csum,
- .set_tx_csum = qlcnic_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
- .get_tso = qlcnic_get_tso,
- .set_tso = qlcnic_set_tso,
.get_wol = qlcnic_get_wol,
.set_wol = qlcnic_set_wol,
.self_test = qlcnic_diag_test,
.get_strings = qlcnic_get_strings,
.get_ethtool_stats = qlcnic_get_ethtool_stats,
.get_sset_count = qlcnic_get_sset_count,
- .get_rx_csum = qlcnic_get_rx_csum,
- .set_rx_csum = qlcnic_set_rx_csum,
.get_coalesce = qlcnic_get_intr_coalesce,
.set_coalesce = qlcnic_set_intr_coalesce,
- .get_flags = ethtool_op_get_flags,
- .set_flags = qlcnic_set_flags,
- .phys_id = qlcnic_blink_led,
+ .set_phys_id = qlcnic_set_led,
.set_msglevel = qlcnic_set_msglevel,
.get_msglevel = qlcnic_get_msglevel,
+ .get_dump_flag = qlcnic_get_dump_flag,
+ .get_dump_data = qlcnic_get_dump_data,
+ .set_dump = qlcnic_set_dump,
};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 726ef555b6bc..d14506f764e0 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -492,10 +492,10 @@ enum {
#define TEST_AGT_CTRL (0x00)
-#define TA_CTL_START 1
-#define TA_CTL_ENABLE 2
-#define TA_CTL_WRITE 4
-#define TA_CTL_BUSY 8
+#define TA_CTL_START BIT_0
+#define TA_CTL_ENABLE BIT_1
+#define TA_CTL_WRITE BIT_2
+#define TA_CTL_BUSY BIT_3
/*
* Register offsets for MN
@@ -765,6 +765,38 @@ struct qlcnic_legacy_intr_set {
#define QLCNIC_MAX_PCI_FUNC 8
#define QLCNIC_MAX_VLAN_FILTERS 64
+/* FW dump defines */
+#define MIU_TEST_CTR 0x41000090
+#define MIU_TEST_ADDR_LO 0x41000094
+#define MIU_TEST_ADDR_HI 0x41000098
+#define FLASH_ROM_WINDOW 0x42110030
+#define FLASH_ROM_DATA 0x42150000
+
+static const u32 MIU_TEST_READ_DATA[] = {
+ 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
+
+#define QLCNIC_FW_DUMP_REG1 0x00130060
+#define QLCNIC_FW_DUMP_REG2 0x001e0000
+#define QLCNIC_FLASH_SEM2_LK 0x0013C010
+#define QLCNIC_FLASH_SEM2_ULK 0x0013C014
+#define QLCNIC_FLASH_LOCK_ID 0x001B2100
+
+#define QLCNIC_RD_DUMP_REG(addr, bar0, data) do { \
+ writel((addr & 0xFFFF0000), (void *) (bar0 + \
+ QLCNIC_FW_DUMP_REG1)); \
+ readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
+ *data = readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + \
+ LSW(addr))); \
+} while (0)
+
+#define QLCNIC_WR_DUMP_REG(addr, bar0, data) do { \
+ writel((addr & 0xFFFF0000), (void *) (bar0 + \
+ QLCNIC_FW_DUMP_REG1)); \
+ readl((void *) (bar0 + QLCNIC_FW_DUMP_REG1)); \
+ writel(data, (void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr)));\
+ readl((void *) (bar0 + QLCNIC_FW_DUMP_REG2 + LSW(addr))); \
+} while (0)
+
/* PCI function operational mode */
enum {
QLCNIC_MGMT_FUNC = 0,
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 616940f0a8d0..e9656616f2a2 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <net/ip.h>
+#include <linux/bitops.h>
#define MASK(n) ((1ULL<<(n))-1)
#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
@@ -457,7 +458,7 @@ int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
- word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+ word = QLCNIC_H2C_OPCODE_SET_MAC_RECEIVE_MODE |
((u64)adapter->portnum << 16);
req.req_hdr = cpu_to_le64(word);
@@ -532,33 +533,31 @@ void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter)
}
}
-#define QLCNIC_CONFIG_INTR_COALESCE 3
-
/*
* Send the interrupt coalescing parameter set by ethtool to the card.
*/
int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
{
struct qlcnic_nic_req req;
- u64 word[6];
- int rv, i;
+ int rv;
memset(&req, 0, sizeof(struct qlcnic_nic_req));
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
- word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
- req.req_hdr = cpu_to_le64(word[0]);
-
- memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
- for (i = 0; i < 6; i++)
- req.words[i] = cpu_to_le64(word[i]);
+ req.req_hdr = cpu_to_le64(QLCNIC_CONFIG_INTR_COALESCE |
+ ((u64) adapter->portnum << 16));
+ req.words[0] = cpu_to_le64(((u64) adapter->ahw->coal.flag) << 32);
+ req.words[2] = cpu_to_le64(adapter->ahw->coal.rx_packets |
+ ((u64) adapter->ahw->coal.rx_time_us) << 16);
+ req.words[5] = cpu_to_le64(adapter->ahw->coal.timer_out |
+ ((u64) adapter->ahw->coal.type) << 32 |
+ ((u64) adapter->ahw->coal.sts_ring_mask) << 40);
rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0)
dev_err(&adapter->netdev->dev,
"Could not send interrupt coalescing parameters\n");
-
return rv;
}
@@ -568,6 +567,9 @@ int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
u64 word;
int rv;
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return 0;
+
memset(&req, 0, sizeof(struct qlcnic_nic_req));
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
@@ -713,6 +715,9 @@ int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
u64 word;
int rv;
+ if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
+ return 0;
+
memset(&req, 0, sizeof(struct qlcnic_nic_req));
req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
@@ -754,6 +759,43 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
return rc;
}
+
+u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ u32 changed = features ^ netdev->features;
+ features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
+ }
+
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
+
+int qlcnic_set_features(struct net_device *netdev, u32 features)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ u32 changed = netdev->features ^ features;
+ int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
+
+ if (!(changed & NETIF_F_LRO))
+ return 0;
+
+ netdev->features = features ^ NETIF_F_LRO;
+
+ if (qlcnic_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+
+ return 0;
+}
+
/*
* Changes the CRB window to the specified window.
*/
@@ -780,7 +822,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
- *addr = adapter->ahw.pci_base0 + m->start_2M +
+ *addr = adapter->ahw->pci_base0 + m->start_2M +
(off - m->start_128M);
return 0;
}
@@ -788,7 +830,7 @@ qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
/*
* Not in direct map, use crb window
*/
- *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
+ *addr = adapter->ahw->pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
return 1;
}
@@ -801,7 +843,7 @@ static int
qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
{
u32 window;
- void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
+ void __iomem *addr = adapter->ahw->pci_base0 + CRB_WINDOW_2M;
off -= QLCNIC_PCI_CRBSPACE;
@@ -838,13 +880,13 @@ qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
if (rv > 0) {
/* indirect access */
- write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ write_lock_irqsave(&adapter->ahw->crb_lock, flags);
crb_win_lock(adapter);
rv = qlcnic_pci_set_crbwindow_2M(adapter, off);
if (!rv)
writel(data, addr);
crb_win_unlock(adapter);
- write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
return rv;
}
@@ -869,12 +911,12 @@ qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
if (rv > 0) {
/* indirect access */
- write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ write_lock_irqsave(&adapter->ahw->crb_lock, flags);
crb_win_lock(adapter);
if (!qlcnic_pci_set_crbwindow_2M(adapter, off))
data = readl(addr);
crb_win_unlock(adapter);
- write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ write_unlock_irqrestore(&adapter->ahw->crb_lock, flags);
return data;
}
@@ -904,9 +946,9 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
window = OCM_WIN_P3P(addr);
- writel(window, adapter->ahw.ocm_win_crb);
+ writel(window, adapter->ahw->ocm_win_crb);
/* read back to flush */
- readl(adapter->ahw.ocm_win_crb);
+ readl(adapter->ahw->ocm_win_crb);
*start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
return 0;
@@ -920,13 +962,13 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
int ret;
u32 start;
- mutex_lock(&adapter->ahw.mem_lock);
+ mutex_lock(&adapter->ahw->mem_lock);
ret = qlcnic_pci_set_window_2M(adapter, off, &start);
if (ret != 0)
goto unlock;
- addr = adapter->ahw.pci_base0 + start;
+ addr = adapter->ahw->pci_base0 + start;
if (op == 0) /* read */
*data = readq(addr);
@@ -934,7 +976,7 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
writeq(*data, addr);
unlock:
- mutex_unlock(&adapter->ahw.mem_lock);
+ mutex_unlock(&adapter->ahw->mem_lock);
return ret;
}
@@ -942,23 +984,23 @@ unlock:
void
qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
{
- void __iomem *addr = adapter->ahw.pci_base0 +
+ void __iomem *addr = adapter->ahw->pci_base0 +
QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
- mutex_lock(&adapter->ahw.mem_lock);
+ mutex_lock(&adapter->ahw->mem_lock);
*data = readq(addr);
- mutex_unlock(&adapter->ahw.mem_lock);
+ mutex_unlock(&adapter->ahw->mem_lock);
}
void
qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
{
- void __iomem *addr = adapter->ahw.pci_base0 +
+ void __iomem *addr = adapter->ahw->pci_base0 +
QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
- mutex_lock(&adapter->ahw.mem_lock);
+ mutex_lock(&adapter->ahw->mem_lock);
writeq(data, addr);
- mutex_unlock(&adapter->ahw.mem_lock);
+ mutex_unlock(&adapter->ahw->mem_lock);
}
#define MAX_CTL_CHECK 1000
@@ -997,7 +1039,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
correct:
off8 = off & ~0xf;
- mutex_lock(&adapter->ahw.mem_lock);
+ mutex_lock(&adapter->ahw->mem_lock);
writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
@@ -1049,7 +1091,7 @@ correct:
ret = 0;
done:
- mutex_unlock(&adapter->ahw.mem_lock);
+ mutex_unlock(&adapter->ahw->mem_lock);
return ret;
}
@@ -1091,7 +1133,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
correct:
off8 = off & ~0xf;
- mutex_lock(&adapter->ahw.mem_lock);
+ mutex_lock(&adapter->ahw->mem_lock);
writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
@@ -1121,7 +1163,7 @@ correct:
ret = 0;
}
- mutex_unlock(&adapter->ahw.mem_lock);
+ mutex_unlock(&adapter->ahw->mem_lock);
return ret;
}
@@ -1145,7 +1187,7 @@ int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
if (qlcnic_rom_fast_read(adapter, offset, &board_type))
return -EIO;
- adapter->ahw.board_type = board_type;
+ adapter->ahw->board_type = board_type;
if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
@@ -1164,20 +1206,20 @@ int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
case QLCNIC_BRDTYPE_P3P_10G_XFP:
case QLCNIC_BRDTYPE_P3P_10000_BASE_T:
- adapter->ahw.port_type = QLCNIC_XGBE;
+ adapter->ahw->port_type = QLCNIC_XGBE;
break;
case QLCNIC_BRDTYPE_P3P_REF_QG:
case QLCNIC_BRDTYPE_P3P_4_GB:
case QLCNIC_BRDTYPE_P3P_4_GB_MM:
- adapter->ahw.port_type = QLCNIC_GBE;
+ adapter->ahw->port_type = QLCNIC_GBE;
break;
case QLCNIC_BRDTYPE_P3P_10G_TP:
- adapter->ahw.port_type = (adapter->portnum < 2) ?
+ adapter->ahw->port_type = (adapter->portnum < 2) ?
QLCNIC_XGBE : QLCNIC_GBE;
break;
default:
dev_err(&pdev->dev, "unknown board type %x\n", board_type);
- adapter->ahw.port_type = QLCNIC_XGBE;
+ adapter->ahw->port_type = QLCNIC_XGBE;
break;
}
@@ -1220,3 +1262,461 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
return rv;
}
+
+/* FW dump related functions */
+static u32
+qlcnic_dump_crb(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
+ u32 *buffer)
+{
+ int i;
+ u32 addr, data;
+ struct __crb *crb = &entry->region.crb;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ addr = crb->addr;
+
+ for (i = 0; i < crb->no_ops; i++) {
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ *buffer++ = cpu_to_le32(addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += crb->stride;
+ }
+ return crb->no_ops * 2 * sizeof(u32);
+}
+
+static u32
+qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, u32 *buffer)
+{
+ int i, k, timeout = 0;
+ void __iomem *base = adapter->ahw->pci_base0;
+ u32 addr, data;
+ u8 opcode, no_ops;
+ struct __ctrl *ctr = &entry->region.ctrl;
+ struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
+
+ addr = ctr->addr;
+ no_ops = ctr->no_ops;
+
+ for (i = 0; i < no_ops; i++) {
+ k = 0;
+ opcode = 0;
+ for (k = 0; k < 8; k++) {
+ if (!(ctr->opcode & (1 << k)))
+ continue;
+ switch (1 << k) {
+ case QLCNIC_DUMP_WCRB:
+ QLCNIC_WR_DUMP_REG(addr, base, ctr->val1);
+ break;
+ case QLCNIC_DUMP_RWCRB:
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ QLCNIC_WR_DUMP_REG(addr, base, data);
+ break;
+ case QLCNIC_DUMP_ANDCRB:
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ QLCNIC_WR_DUMP_REG(addr, base,
+ (data & ctr->val2));
+ break;
+ case QLCNIC_DUMP_ORCRB:
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ QLCNIC_WR_DUMP_REG(addr, base,
+ (data | ctr->val3));
+ break;
+ case QLCNIC_DUMP_POLLCRB:
+ while (timeout <= ctr->timeout) {
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ if ((data & ctr->val2) == ctr->val1)
+ break;
+ msleep(1);
+ timeout++;
+ }
+ if (timeout > ctr->timeout) {
+ dev_info(&adapter->pdev->dev,
+ "Timed out, aborting poll CRB\n");
+ return -EINVAL;
+ }
+ break;
+ case QLCNIC_DUMP_RD_SAVE:
+ if (ctr->index_a)
+ addr = t_hdr->saved_state[ctr->index_a];
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ t_hdr->saved_state[ctr->index_v] = data;
+ break;
+ case QLCNIC_DUMP_WRT_SAVED:
+ if (ctr->index_v)
+ data = t_hdr->saved_state[ctr->index_v];
+ else
+ data = ctr->val1;
+ if (ctr->index_a)
+ addr = t_hdr->saved_state[ctr->index_a];
+ QLCNIC_WR_DUMP_REG(addr, base, data);
+ break;
+ case QLCNIC_DUMP_MOD_SAVE_ST:
+ data = t_hdr->saved_state[ctr->index_v];
+ data <<= ctr->shl_val;
+ data >>= ctr->shr_val;
+ if (ctr->val2)
+ data &= ctr->val2;
+ data |= ctr->val3;
+ data += ctr->val1;
+ t_hdr->saved_state[ctr->index_v] = data;
+ break;
+ default:
+ dev_info(&adapter->pdev->dev,
+ "Unknown opcode\n");
+ break;
+ }
+ }
+ addr += ctr->stride;
+ }
+ return 0;
+}
+
+static u32
+qlcnic_dump_mux(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
+ u32 *buffer)
+{
+ int loop;
+ u32 val, data = 0;
+ struct __mux *mux = &entry->region.mux;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ val = mux->val;
+ for (loop = 0; loop < mux->no_ops; loop++) {
+ QLCNIC_WR_DUMP_REG(mux->addr, base, val);
+ QLCNIC_RD_DUMP_REG(mux->read_addr, base, &data);
+ *buffer++ = cpu_to_le32(val);
+ *buffer++ = cpu_to_le32(data);
+ val += mux->val_stride;
+ }
+ return 2 * mux->no_ops * sizeof(u32);
+}
+
+static u32
+qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
+ u32 *buffer)
+{
+ int i, loop;
+ u32 cnt, addr, data, que_id = 0;
+ void __iomem *base = adapter->ahw->pci_base0;
+ struct __queue *que = &entry->region.que;
+
+ addr = que->read_addr;
+ cnt = que->read_addr_cnt;
+
+ for (loop = 0; loop < que->no_ops; loop++) {
+ QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
+ for (i = 0; i < cnt; i++) {
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ *buffer++ = cpu_to_le32(data);
+ addr += que->read_addr_stride;
+ }
+ que_id += que->stride;
+ }
+ return que->no_ops * cnt * sizeof(u32);
+}
+
+static u32
+qlcnic_dump_ocm(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
+ u32 *buffer)
+{
+ int i;
+ u32 data;
+ void __iomem *addr;
+ struct __ocm *ocm = &entry->region.ocm;
+
+ addr = adapter->ahw->pci_base0 + ocm->read_addr;
+ for (i = 0; i < ocm->no_ops; i++) {
+ data = readl(addr);
+ *buffer++ = cpu_to_le32(data);
+ addr += ocm->read_addr_stride;
+ }
+ return ocm->no_ops * sizeof(u32);
+}
+
+static u32
+qlcnic_read_rom(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
+ u32 *buffer)
+{
+ int i, count = 0;
+ u32 fl_addr, size, val, lck_val, addr;
+ struct __mem *rom = &entry->region.mem;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ fl_addr = rom->addr;
+ size = rom->size/4;
+lock_try:
+ lck_val = readl(base + QLCNIC_FLASH_SEM2_LK);
+ if (!lck_val && count < MAX_CTL_CHECK) {
+ msleep(10);
+ count++;
+ goto lock_try;
+ }
+ writel(adapter->ahw->pci_func, (base + QLCNIC_FLASH_LOCK_ID));
+ for (i = 0; i < size; i++) {
+ addr = fl_addr & 0xFFFF0000;
+ QLCNIC_WR_DUMP_REG(FLASH_ROM_WINDOW, base, addr);
+ addr = LSW(fl_addr) + FLASH_ROM_DATA;
+ QLCNIC_RD_DUMP_REG(addr, base, &val);
+ fl_addr += 4;
+ *buffer++ = cpu_to_le32(val);
+ }
+ readl(base + QLCNIC_FLASH_SEM2_ULK);
+ return rom->size;
+}
+
+static u32
+qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, u32 *buffer)
+{
+ int i;
+ u32 cnt, val, data, addr;
+ void __iomem *base = adapter->ahw->pci_base0;
+ struct __cache *l1 = &entry->region.cache;
+
+ val = l1->init_tag_val;
+
+ for (i = 0; i < l1->no_ops; i++) {
+ QLCNIC_WR_DUMP_REG(l1->addr, base, val);
+ QLCNIC_WR_DUMP_REG(l1->ctrl_addr, base, LSW(l1->ctrl_val));
+ addr = l1->read_addr;
+ cnt = l1->read_addr_num;
+ while (cnt) {
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ *buffer++ = cpu_to_le32(data);
+ addr += l1->read_addr_stride;
+ cnt--;
+ }
+ val += l1->stride;
+ }
+ return l1->no_ops * l1->read_addr_num * sizeof(u32);
+}
+
+static u32
+qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, u32 *buffer)
+{
+ int i;
+ u32 cnt, val, data, addr;
+ u8 poll_mask, poll_to, time_out = 0;
+ void __iomem *base = adapter->ahw->pci_base0;
+ struct __cache *l2 = &entry->region.cache;
+
+ val = l2->init_tag_val;
+ poll_mask = LSB(MSW(l2->ctrl_val));
+ poll_to = MSB(MSW(l2->ctrl_val));
+
+ for (i = 0; i < l2->no_ops; i++) {
+ QLCNIC_WR_DUMP_REG(l2->addr, base, val);
+ do {
+ QLCNIC_WR_DUMP_REG(l2->ctrl_addr, base,
+ LSW(l2->ctrl_val));
+ QLCNIC_RD_DUMP_REG(l2->ctrl_addr, base, &data);
+ if (!(data & poll_mask))
+ break;
+ msleep(1);
+ time_out++;
+ } while (time_out <= poll_to);
+ if (time_out > poll_to)
+ return -EINVAL;
+
+ addr = l2->read_addr;
+ cnt = l2->read_addr_num;
+ while (cnt) {
+ QLCNIC_RD_DUMP_REG(addr, base, &data);
+ *buffer++ = cpu_to_le32(data);
+ addr += l2->read_addr_stride;
+ cnt--;
+ }
+ val += l2->stride;
+ }
+ return l2->no_ops * l2->read_addr_num * sizeof(u32);
+}
+
+static u32
+qlcnic_read_memory(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, u32 *buffer)
+{
+ u32 addr, data, test, ret = 0;
+ int i, reg_read;
+ struct __mem *mem = &entry->region.mem;
+ void __iomem *base = adapter->ahw->pci_base0;
+
+ reg_read = mem->size;
+ addr = mem->addr;
+ /* check for data size of multiple of 16 and 16 byte alignment */
+ if ((addr & 0xf) || (reg_read%16)) {
+ dev_info(&adapter->pdev->dev,
+ "Unaligned memory addr:0x%x size:0x%x\n",
+ addr, reg_read);
+ return -EINVAL;
+ }
+
+ mutex_lock(&adapter->ahw->mem_lock);
+
+ while (reg_read != 0) {
+ QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_LO, base, addr);
+ QLCNIC_WR_DUMP_REG(MIU_TEST_ADDR_HI, base, 0);
+ QLCNIC_WR_DUMP_REG(MIU_TEST_CTR, base,
+ TA_CTL_ENABLE | TA_CTL_START);
+
+ for (i = 0; i < MAX_CTL_CHECK; i++) {
+ QLCNIC_RD_DUMP_REG(MIU_TEST_CTR, base, &test);
+ if (!(test & TA_CTL_BUSY))
+ break;
+ }
+ if (i == MAX_CTL_CHECK) {
+ if (printk_ratelimit()) {
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+ for (i = 0; i < 4; i++) {
+ QLCNIC_RD_DUMP_REG(MIU_TEST_READ_DATA[i], base, &data);
+ *buffer++ = cpu_to_le32(data);
+ }
+ addr += 16;
+ reg_read -= 16;
+ ret += 16;
+ }
+out:
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return mem->size;
+}
+
+static u32
+qlcnic_dump_nop(struct qlcnic_adapter *adapter,
+ struct qlcnic_dump_entry *entry, u32 *buffer)
+{
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ return 0;
+}
+
+struct qlcnic_dump_operations fw_dump_ops[] = {
+ { QLCNIC_DUMP_NOP, qlcnic_dump_nop },
+ { QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb },
+ { QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux },
+ { QLCNIC_DUMP_QUEUE, qlcnic_dump_que },
+ { QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom },
+ { QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm },
+ { QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl },
+ { QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache },
+ { QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache },
+ { QLCNIC_DUMP_READ_ROM, qlcnic_read_rom },
+ { QLCNIC_DUMP_READ_MEM, qlcnic_read_memory },
+ { QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl },
+ { QLCNIC_DUMP_TLHDR, qlcnic_dump_nop },
+ { QLCNIC_DUMP_RDEND, qlcnic_dump_nop },
+};
+
+/* Walk the template and collect dump for each entry in the dump template */
+static int
+qlcnic_valid_dump_entry(struct device *dev, struct qlcnic_dump_entry *entry,
+ u32 size)
+{
+ int ret = 1;
+ if (size != entry->hdr.cap_size) {
+ dev_info(dev,
+ "Invalidate dump, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
+ entry->hdr.type, entry->hdr.mask, size, entry->hdr.cap_size);
+ dev_info(dev, "Aborting further dump capture\n");
+ ret = 0;
+ }
+ return ret;
+}
+
+int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
+{
+ u32 *buffer;
+ char mesg[64];
+ char *msg[] = {mesg, NULL};
+ int i, k, ops_cnt, ops_index, dump_size = 0;
+ u32 entry_offset, dump, no_entries, buf_offset = 0;
+ struct qlcnic_dump_entry *entry;
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
+
+ if (fw_dump->clr) {
+ dev_info(&adapter->pdev->dev,
+ "Previous dump not cleared, not capturing dump\n");
+ return -EIO;
+ }
+ /* Calculate the size for dump data area only */
+ for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
+ if (i & tmpl_hdr->drv_cap_mask)
+ dump_size += tmpl_hdr->cap_sizes[k];
+ if (!dump_size)
+ return -EIO;
+
+ fw_dump->data = vzalloc(dump_size);
+ if (!fw_dump->data) {
+ dev_info(&adapter->pdev->dev,
+ "Unable to allocate (%d KB) for fw dump\n",
+ dump_size/1024);
+ return -ENOMEM;
+ }
+ buffer = fw_dump->data;
+ fw_dump->size = dump_size;
+ no_entries = tmpl_hdr->num_entries;
+ ops_cnt = ARRAY_SIZE(fw_dump_ops);
+ entry_offset = tmpl_hdr->offset;
+ tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
+ tmpl_hdr->sys_info[1] = adapter->fw_version;
+
+ for (i = 0; i < no_entries; i++) {
+ entry = (struct qlcnic_dump_entry *) ((void *) tmpl_hdr +
+ entry_offset);
+ if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ entry_offset += entry->hdr.offset;
+ continue;
+ }
+ /* Find the handler for this entry */
+ ops_index = 0;
+ while (ops_index < ops_cnt) {
+ if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
+ break;
+ ops_index++;
+ }
+ if (ops_index == ops_cnt) {
+ dev_info(&adapter->pdev->dev,
+ "Invalid entry type %d, exiting dump\n",
+ entry->hdr.type);
+ goto error;
+ }
+ /* Collect dump for this entry */
+ dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
+ if (dump && !qlcnic_valid_dump_entry(&adapter->pdev->dev, entry,
+ dump))
+ entry->hdr.flags |= QLCNIC_DUMP_SKIP;
+ buf_offset += entry->hdr.cap_size;
+ entry_offset += entry->hdr.offset;
+ buffer = fw_dump->data + buf_offset;
+ }
+ if (dump_size != buf_offset) {
+ dev_info(&adapter->pdev->dev,
+ "Captured(%d) and expected size(%d) do not match\n",
+ buf_offset, dump_size);
+ goto error;
+ } else {
+ fw_dump->clr = 1;
+ snprintf(mesg, sizeof(mesg), "FW dump for device: %d\n",
+ adapter->pdev->devfn);
+ dev_info(&adapter->pdev->dev, "Dump data, %d bytes captured\n",
+ fw_dump->size);
+ /* Send a udev event to notify availability of FW dump */
+ kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
+ return 0;
+ }
+error:
+ vfree(fw_dump->data);
+ return -EINVAL;
+}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index a7f1d5b7e811..5b8bbcf904d5 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -94,7 +94,7 @@ void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
struct qlcnic_rx_buffer *rx_buf;
int i, ring;
- recv_ctx = &adapter->recv_ctx;
+ recv_ctx = adapter->recv_ctx;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
for (i = 0; i < rds_ring->num_desc; ++i) {
@@ -119,7 +119,7 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
struct qlcnic_rx_buffer *rx_buf;
int i, ring;
- recv_ctx = &adapter->recv_ctx;
+ recv_ctx = adapter->recv_ctx;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
rds_ring = &recv_ctx->rds_rings[ring];
@@ -173,7 +173,7 @@ void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
struct qlcnic_host_tx_ring *tx_ring;
int ring;
- recv_ctx = &adapter->recv_ctx;
+ recv_ctx = adapter->recv_ctx;
if (recv_ctx->rds_rings == NULL)
goto skip_rds;
@@ -226,7 +226,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
}
tx_ring->cmd_buf_arr = cmd_buf_arr;
- recv_ctx = &adapter->recv_ctx;
+ recv_ctx = adapter->recv_ctx;
size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
rds_ring = kzalloc(size, GFP_KERNEL);
@@ -345,7 +345,7 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
}
static int do_rom_fast_read(struct qlcnic_adapter *adapter,
- int addr, int *valp)
+ u32 addr, u32 *valp)
{
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
@@ -398,7 +398,7 @@ qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
return ret;
}
-int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
{
int ret;
@@ -864,7 +864,7 @@ nomn:
for (i = 0; i < entries; i++) {
__le32 flags, file_chiprev, offs;
- u8 chiprev = adapter->ahw.revision_id;
+ u8 chiprev = adapter->ahw->revision_id;
u32 flagbit;
offs = cpu_to_le32(ptab_descr->findex) +
@@ -1130,9 +1130,20 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
} else {
u64 data;
u32 hi, lo;
-
- size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
- flashaddr = QLCNIC_BOOTLD_START;
+ int ret;
+ struct qlcnic_flt_entry bootld_entry;
+
+ ret = qlcnic_get_flt_entry(adapter, QLCNIC_BOOTLD_REGION,
+ &bootld_entry);
+ if (!ret) {
+ size = bootld_entry.size / 8;
+ flashaddr = bootld_entry.start_addr;
+ } else {
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+ flashaddr = QLCNIC_BOOTLD_START;
+ dev_info(&pdev->dev,
+ "using legacy method to get flash fw region");
+ }
for (i = 0; i < size; i++) {
if (qlcnic_rom_fast_read(adapter,
@@ -1379,8 +1390,8 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
skb = buffer->skb;
- if (likely(adapter->rx_csum && (cksum == STATUS_CKSUM_OK ||
- cksum == STATUS_CKSUM_LOOP))) {
+ if (likely((adapter->netdev->features & NETIF_F_RXCSUM) &&
+ (cksum == STATUS_CKSUM_OK || cksum == STATUS_CKSUM_LOOP))) {
adapter->stats.csummed++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
@@ -1394,7 +1405,7 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
return skb;
}
-static int
+static inline int
qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
u16 *vlan_tag)
{
@@ -1425,7 +1436,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
int ring, u64 sts_data0)
{
struct net_device *netdev = adapter->netdev;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
@@ -1467,10 +1478,10 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
skb->protocol = eth_type_trans(skb, netdev);
- if ((vid != 0xffff) && adapter->vlgrp)
- vlan_gro_receive(&sds_ring->napi, adapter->vlgrp, vid, skb);
- else
- napi_gro_receive(&sds_ring->napi, skb);
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+
+ napi_gro_receive(&sds_ring->napi, skb);
adapter->stats.rx_pkts++;
adapter->stats.rxbytes += length;
@@ -1488,7 +1499,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
int ring, u64 sts_data0, u64 sts_data1)
{
struct net_device *netdev = adapter->netdev;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
struct qlcnic_rx_buffer *buffer;
struct sk_buff *skb;
struct qlcnic_host_rds_ring *rds_ring;
@@ -1552,10 +1563,9 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
length = skb->len;
- if ((vid != 0xffff) && adapter->vlgrp)
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vid);
- else
- netif_receive_skb(skb);
+ if (vid != 0xffff)
+ __vlan_hwaccel_put_tag(skb, vid);
+ netif_receive_skb(skb);
adapter->stats.lro_pkts++;
adapter->stats.lrobytes += length;
@@ -1625,7 +1635,7 @@ skip:
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
struct qlcnic_host_rds_ring *rds_ring =
- &adapter->recv_ctx.rds_rings[ring];
+ &adapter->recv_ctx->rds_rings[ring];
if (!list_empty(&sds_ring->free_list[ring])) {
list_for_each(cur, &sds_ring->free_list[ring]) {
@@ -1651,12 +1661,13 @@ skip:
}
void
-qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_host_rds_ring *rds_ring)
{
struct rcv_desc *pdesc;
struct qlcnic_rx_buffer *buffer;
- int producer, count = 0;
+ int count = 0;
+ u32 producer;
struct list_head *head;
producer = rds_ring->producer;
@@ -1696,7 +1707,8 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
{
struct rcv_desc *pdesc;
struct qlcnic_rx_buffer *buffer;
- int producer, count = 0;
+ int count = 0;
+ uint32_t producer;
struct list_head *head;
if (!spin_trylock(&rds_ring->lock))
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index cd88c7e1bfa9..3ab7d2c7baf2 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -13,12 +13,12 @@
#include <linux/swab.h>
#include <linux/dma-mapping.h>
-#include <linux/if_vlan.h>
#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
#include <linux/aer.h>
+#include <linux/log2.h>
MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -98,6 +98,9 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
+static void qlcnic_vlan_rx_add(struct net_device *, u16);
+static void qlcnic_vlan_rx_del(struct net_device *, u16);
+
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
@@ -113,7 +116,7 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
-void
+inline void
qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
@@ -169,7 +172,7 @@ qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
return -ENOMEM;
@@ -193,14 +196,14 @@ qlcnic_napi_del(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_del(&sds_ring->napi);
}
- qlcnic_free_sds_rings(&adapter->recv_ctx);
+ qlcnic_free_sds_rings(adapter->recv_ctx);
}
static void
@@ -208,7 +211,7 @@ qlcnic_napi_enable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -225,7 +228,7 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -317,13 +320,6 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
return 0;
}
-static void qlcnic_vlan_rx_register(struct net_device *netdev,
- struct vlan_group *grp)
-{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
- adapter->vlgrp = grp;
-}
-
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -333,8 +329,11 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_set_multicast_list = qlcnic_set_multi,
.ndo_set_mac_address = qlcnic_set_mac,
.ndo_change_mtu = qlcnic_change_mtu,
+ .ndo_fix_features = qlcnic_fix_features,
+ .ndo_set_features = qlcnic_set_features,
.ndo_tx_timeout = qlcnic_tx_timeout,
- .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = qlcnic_vlan_rx_add,
+ .ndo_vlan_rx_kill_vid = qlcnic_vlan_rx_del,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -352,72 +351,87 @@ static struct qlcnic_nic_template qlcnic_vf_ops = {
.start_firmware = qlcnicvf_start_firmware
};
-static void
-qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+static int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
- const struct qlcnic_legacy_intr_set *legacy_intrp;
struct pci_dev *pdev = adapter->pdev;
- int err, num_msix;
-
- if (adapter->rss_supported) {
- num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
- MSIX_ENTRIES_PER_ADAPTER : 2;
- } else
- num_msix = 1;
+ int err = -1;
adapter->max_sds_rings = 1;
-
adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
-
- legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
-
- adapter->int_vec_bit = legacy_intrp->int_vec_bit;
- adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
- legacy_intrp->tgt_status_reg);
- adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
- legacy_intrp->tgt_mask_reg);
- adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
-
- adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
- ISR_INT_STATE_REG);
-
qlcnic_set_msix_bit(pdev, 0);
if (adapter->msix_supported) {
-
+ enable_msix:
qlcnic_init_msix_entries(adapter, num_msix);
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
if (err == 0) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
qlcnic_set_msix_bit(pdev, 1);
- if (adapter->rss_supported)
- adapter->max_sds_rings = num_msix;
+ adapter->max_sds_rings = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
- return;
+ return err;
}
+ if (err > 0) {
+ num_msix = rounddown_pow_of_two(err);
+ if (num_msix)
+ goto enable_msix;
+ }
+ }
+ return err;
+}
- if (err > 0)
- pci_disable_msix(pdev);
- /* fall through for msi */
- }
+static void qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
+{
+ const struct qlcnic_legacy_intr_set *legacy_intrp;
+ struct pci_dev *pdev = adapter->pdev;
if (use_msi && !pci_enable_msi(pdev)) {
adapter->flags |= QLCNIC_MSI_ENABLED;
adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
- msi_tgt_status[adapter->ahw.pci_func]);
+ msi_tgt_status[adapter->ahw->pci_func]);
dev_info(&pdev->dev, "using msi interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
return;
}
+ legacy_intrp = &legacy_intr[adapter->ahw->pci_func];
+
+ adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_status_reg);
+ adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_mask_reg);
+ adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+ adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
+ ISR_INT_STATE_REG);
dev_info(&pdev->dev, "using legacy interrupts\n");
adapter->msix_entries[0].vector = pdev->irq;
}
static void
+qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+ int num_msix;
+
+ if (adapter->msix_supported) {
+ num_msix = (num_online_cpus() >=
+ QLCNIC_DEF_NUM_STS_DESC_RINGS) ?
+ QLCNIC_DEF_NUM_STS_DESC_RINGS :
+ QLCNIC_MIN_NUM_RSS_RINGS;
+ } else
+ num_msix = 1;
+
+ if (!qlcnic_enable_msix(adapter, num_msix))
+ return;
+
+ qlcnic_enable_msi_legacy(adapter);
+}
+
+static void
qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
{
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -429,8 +443,8 @@ qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
static void
qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
{
- if (adapter->ahw.pci_base0 != NULL)
- iounmap(adapter->ahw.pci_base0);
+ if (adapter->ahw->pci_base0 != NULL)
+ iounmap(adapter->ahw->pci_base0);
}
static int
@@ -464,8 +478,10 @@ qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pfn = pci_info[i].id;
- if (pfn > QLCNIC_MAX_PCI_FUNC)
- return QL_STATUS_INVALID_PARAM;
+ if (pfn > QLCNIC_MAX_PCI_FUNC) {
+ ret = QL_STATUS_INVALID_PARAM;
+ goto err_eswitch;
+ }
adapter->npars[pfn].active = (u8)pci_info[i].active;
adapter->npars[pfn].type = (u8)pci_info[i].type;
adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
@@ -498,7 +514,7 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
u32 ref_count;
int i, ret = 1;
u32 data = QLCNIC_MGMT_FUNC;
- void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+ void __iomem *priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
/* If other drivers are not in use set their privilege level */
ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
@@ -510,16 +526,16 @@ qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
id = i;
if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
- id == adapter->ahw.pci_func)
+ id == adapter->ahw->pci_func)
continue;
data |= (qlcnic_config_npars &
QLC_DEV_SET_DRV(0xf, id));
}
} else {
data = readl(priv_op);
- data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
+ data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw->pci_func)) |
(QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
- adapter->ahw.pci_func));
+ adapter->ahw->pci_func));
}
writel(data, priv_op);
qlcnic_api_unlock(adapter);
@@ -537,22 +553,23 @@ qlcnic_check_vf(struct qlcnic_adapter *adapter)
u32 op_mode, priv_level;
/* Determine FW API version */
- adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
+ adapter->fw_hal_version = readl(adapter->ahw->pci_base0 +
+ QLCNIC_FW_API);
/* Find PCI function number */
pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
- msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
+ msix_base_addr = adapter->ahw->pci_base0 + QLCNIC_MSIX_BASE;
msix_base = readl(msix_base_addr);
func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
- adapter->ahw.pci_func = func;
+ adapter->ahw->pci_func = func;
/* Determine function privilege level */
- priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+ priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
op_mode = readl(priv_op);
if (op_mode == QLC_DEV_DRV_DEFAULT)
priv_level = QLCNIC_MGMT_FUNC;
else
- priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (priv_level == QLCNIC_NON_PRIV_FUNC) {
adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
@@ -591,13 +608,14 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
- adapter->ahw.pci_base0 = mem_ptr0;
- adapter->ahw.pci_len0 = pci_len0;
+ adapter->ahw->pci_base0 = mem_ptr0;
+ adapter->ahw->pci_len0 = pci_len0;
qlcnic_check_vf(adapter);
- adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
- QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
+ adapter->ahw->ocm_win_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(
+ adapter->ahw->pci_func)));
return 0;
}
@@ -639,7 +657,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
fw_major, fw_minor, fw_build);
- if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if (adapter->ahw->port_type == QLCNIC_XGBE) {
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
@@ -651,7 +669,7 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
- } else if (adapter->ahw.port_type == QLCNIC_GBE) {
+ } else if (adapter->ahw->port_type == QLCNIC_GBE) {
adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
@@ -659,7 +677,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
}
adapter->msix_supported = !!use_msi_x;
- adapter->rss_supported = !!use_msi_x;
adapter->num_txd = MAX_CMD_DESCRIPTORS;
@@ -672,7 +689,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
int err;
struct qlcnic_info nic_info;
- err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
+ err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
if (err)
return err;
@@ -708,6 +725,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
}
static void
+qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ set_bit(vid, adapter->vlans);
+}
+
+static void
+qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
+ clear_bit(vid, adapter->vlans);
+}
+
+static void
qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
@@ -734,7 +767,7 @@ qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return 0;
- esw_cfg.pci_func = adapter->ahw.pci_func;
+ esw_cfg.pci_func = adapter->ahw->pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
return -EIO;
qlcnic_set_vlan_config(adapter, &esw_cfg);
@@ -750,28 +783,27 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
unsigned long features, vlan_features;
- features = (NETIF_F_SG | NETIF_F_IP_CSUM |
+ features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO);
vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM);
+ NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_FILTER);
if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
features |= (NETIF_F_TSO | NETIF_F_TSO6);
vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
}
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+
+ if (netdev->features & NETIF_F_LRO)
features |= NETIF_F_LRO;
if (esw_cfg->offload_flags & BIT_0) {
netdev->features |= features;
- adapter->rx_csum = 1;
if (!(esw_cfg->offload_flags & BIT_1))
netdev->features &= ~NETIF_F_TSO;
if (!(esw_cfg->offload_flags & BIT_2))
netdev->features &= ~NETIF_F_TSO6;
} else {
netdev->features &= ~features;
- adapter->rx_csum = 0;
}
netdev->vlan_features = (features & vlan_features);
@@ -791,14 +823,14 @@ qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
return 0;
- priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
+ priv_op = adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE;
op_mode = readl(priv_op);
- priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (op_mode == QLC_DEV_DRV_DEFAULT)
priv_level = QLCNIC_MGMT_FUNC;
else
- priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
+ priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw->pci_func);
if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
if (priv_level == QLCNIC_MGMT_FUNC) {
@@ -1038,7 +1070,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
unsigned long flags = 0;
struct net_device *netdev = adapter->netdev;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
handler = qlcnic_tmp_intr;
@@ -1075,7 +1107,7 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
int ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
@@ -1083,20 +1115,6 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
}
}
-static void
-qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
-{
- adapter->coal.flags = QLCNIC_INTR_DEFAULT;
- adapter->coal.normal.data.rx_time_us =
- QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
- adapter->coal.normal.data.rx_packets =
- QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
- adapter->coal.normal.data.tx_time_us =
- QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
- adapter->coal.normal.data.tx_packets =
- QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
-}
-
static int
__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
@@ -1115,14 +1133,14 @@ __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
return -EIO;
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
- rds_ring = &adapter->recv_ctx.rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring);
}
qlcnic_set_multi(netdev);
qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
- adapter->ahw.linkup = 0;
+ adapter->ahw->linkup = 0;
if (adapter->max_sds_rings > 1)
qlcnic_config_rss(adapter, 1);
@@ -1230,8 +1248,6 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
goto err_out_free_hw;
}
- qlcnic_init_coalesce_defaults(adapter);
-
qlcnic_create_sysfs_entries(adapter);
adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
@@ -1272,7 +1288,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_disable_int(sds_ring);
}
}
@@ -1293,6 +1309,48 @@ out:
netif_device_attach(netdev);
}
+static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ adapter->ahw = kzalloc(sizeof(struct qlcnic_hardware_context),
+ GFP_KERNEL);
+ if (!adapter->ahw) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to allocate recv ctx resources for adapter\n");
+ err = -ENOMEM;
+ goto err_out;
+ }
+ adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
+ GFP_KERNEL);
+ if (!adapter->recv_ctx) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to allocate recv ctx resources for adapter\n");
+ kfree(adapter->ahw);
+ adapter->ahw = NULL;
+ err = -ENOMEM;
+ goto err_out;
+ }
+ /* Initialize interrupt coalesce parameters */
+ adapter->ahw->coal.flag = QLCNIC_INTR_DEFAULT;
+ adapter->ahw->coal.rx_time_us = QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->ahw->coal.rx_packets = QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+err_out:
+ return err;
+}
+
+static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
+{
+ kfree(adapter->recv_ctx);
+ adapter->recv_ctx = NULL;
+
+ if (adapter->ahw->fw_dump.tmpl_hdr) {
+ vfree(adapter->ahw->fw_dump.tmpl_hdr);
+ adapter->ahw->fw_dump.tmpl_hdr = NULL;
+ }
+ kfree(adapter->ahw);
+ adapter->ahw = NULL;
+}
+
int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -1325,13 +1383,13 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
}
for (ring = 0; ring < adapter->max_rds_rings; ring++) {
- rds_ring = &adapter->recv_ctx.rds_rings[ring];
- qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ rds_ring = &adapter->recv_ctx->rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, rds_ring);
}
if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
- sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_enable_int(sds_ring);
}
}
@@ -1399,7 +1457,6 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
int err;
struct pci_dev *pdev = adapter->pdev;
- adapter->rx_csum = 1;
adapter->mc_enabled = 0;
adapter->max_mc_count = 38;
@@ -1410,26 +1467,24 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
- netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
- netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM);
+ netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
- if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
- netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
- netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
- }
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
+ netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (pci_using_dac)
+ netdev->hw_features |= NETIF_F_HIGHDMA;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->vlan_features = netdev->hw_features;
if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
- netdev->features |= (NETIF_F_HW_VLAN_TX);
-
+ netdev->hw_features |= NETIF_F_HW_VLAN_TX;
if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
- netdev->features |= NETIF_F_LRO;
+ netdev->hw_features |= NETIF_F_LRO;
+
+ netdev->features |= netdev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+
netdev->irq = adapter->msix_entries[0].vector;
netif_carrier_off(netdev);
@@ -1459,6 +1514,19 @@ static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
return 0;
}
+static int
+qlcnic_alloc_msix_entries(struct qlcnic_adapter *adapter, u16 count)
+{
+ adapter->msix_entries = kcalloc(count, sizeof(struct msix_entry),
+ GFP_KERNEL);
+
+ if (adapter->msix_entries)
+ return 0;
+
+ dev_err(&adapter->pdev->dev, "failed allocating msix_entries\n");
+ return -ENOMEM;
+}
+
static int __devinit
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -1501,23 +1569,30 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pdev = pdev;
- adapter->dev_rst_time = jiffies;
+ if (qlcnic_alloc_adapter_resources(adapter))
+ goto err_out_free_netdev;
+
+ adapter->dev_rst_time = jiffies;
revision_id = pdev->revision;
- adapter->ahw.revision_id = revision_id;
+ adapter->ahw->revision_id = revision_id;
- rwlock_init(&adapter->ahw.crb_lock);
- mutex_init(&adapter->ahw.mem_lock);
+ rwlock_init(&adapter->ahw->crb_lock);
+ mutex_init(&adapter->ahw->mem_lock);
spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
err = qlcnic_setup_pci_map(adapter);
if (err)
- goto err_out_free_netdev;
+ goto err_out_free_hw;
/* This will be reset for mezz cards */
- adapter->portnum = adapter->ahw.pci_func;
+ adapter->portnum = adapter->ahw->pci_func;
+
+ /* Get FW dump template and store it */
+ if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
+ qlcnic_fw_cmd_get_minidump_temp(adapter);
err = qlcnic_get_board_info(adapter);
if (err) {
@@ -1545,11 +1620,15 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pr_info("%s: %s Board Chip rev 0x%x\n",
module_name(THIS_MODULE),
- brd_name, adapter->ahw.revision_id);
+ brd_name, adapter->ahw->revision_id);
}
qlcnic_clear_stats(adapter);
+ err = qlcnic_alloc_msix_entries(adapter, adapter->max_rx_ques);
+ if (err)
+ goto err_out_decr_ref;
+
qlcnic_setup_intr(adapter);
err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
@@ -1560,7 +1639,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
- switch (adapter->ahw.port_type) {
+ switch (adapter->ahw->port_type) {
case QLCNIC_GBE:
dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
adapter->netdev->name);
@@ -1578,6 +1657,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_disable_msi:
qlcnic_teardown_intr(adapter);
+ kfree(adapter->msix_entries);
err_out_decr_ref:
qlcnic_clr_all_drv_state(adapter, 0);
@@ -1585,6 +1665,9 @@ err_out_decr_ref:
err_out_iounmap:
qlcnic_cleanup_pci_map(adapter);
+err_out_free_hw:
+ qlcnic_free_adapter_resources(adapter);
+
err_out_free_netdev:
free_netdev(netdev);
@@ -1626,6 +1709,7 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
qlcnic_free_lb_filters_mem(adapter);
qlcnic_teardown_intr(adapter);
+ kfree(adapter->msix_entries);
qlcnic_remove_diag_entries(adapter);
@@ -1638,6 +1722,7 @@ static void __devexit qlcnic_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
+ qlcnic_free_adapter_resources(adapter);
free_netdev(netdev);
}
static int __qlcnic_shutdown(struct pci_dev *pdev)
@@ -1819,6 +1904,7 @@ static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
vlan_req->vlan_id = vlan_id;
tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
+ smp_mb();
}
#define QLCNIC_MAC_HASH(MAC)\
@@ -1879,58 +1965,122 @@ qlcnic_send_filter(struct qlcnic_adapter *adapter,
spin_unlock(&adapter->mac_learn_lock);
}
-static void
-qlcnic_tso_check(struct net_device *netdev,
- struct qlcnic_host_tx_ring *tx_ring,
+static int
+qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
struct sk_buff *skb)
{
- u8 opcode = TX_ETHER_PKT;
- __be16 protocol = skb->protocol;
- u16 flags = 0;
- int copied, offset, copy_len, hdr_len = 0, tso = 0;
+ u8 opcode = 0, hdr_len = 0;
+ u16 flags = 0, vlan_tci = 0;
+ int copied, offset, copy_len;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ u16 protocol = ntohs(skb->protocol);
u32 producer = tx_ring->producer;
- __le16 vlan_oob = first_desc->flags_opcode &
- cpu_to_le16(FLAGS_VLAN_OOB);
+
+ if (protocol == ETH_P_8021Q) {
+ vh = (struct vlan_ethhdr *)skb->data;
+ flags = FLAGS_VLAN_TAGGED;
+ vlan_tci = vh->h_vlan_TCI;
+ } else if (vlan_tx_tag_present(skb)) {
+ flags = FLAGS_VLAN_OOB;
+ vlan_tci = vlan_tx_tag_get(skb);
+ }
+ if (unlikely(adapter->pvid)) {
+ if (vlan_tci && !(adapter->flags & QLCNIC_TAGGING_ENABLED))
+ return -EIO;
+ if (vlan_tci && (adapter->flags & QLCNIC_TAGGING_ENABLED))
+ goto set_flags;
+
+ flags = FLAGS_VLAN_OOB;
+ vlan_tci = adapter->pvid;
+ }
+set_flags:
+ qlcnic_set_tx_vlan_tci(first_desc, vlan_tci);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
if (*(skb->data) & BIT_0) {
flags |= BIT_0;
memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
}
-
- if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ opcode = TX_ETHER_PKT;
+ if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
skb_shinfo(skb)->gso_size > 0) {
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
- if (vlan_oob) {
+
+ opcode = (protocol == ETH_P_IPV6) ? TX_TCP_LSO6 : TX_TCP_LSO;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring */
+ copied = 0;
+ offset = 2;
+
+ if (flags & FLAGS_VLAN_OOB) {
first_desc->total_hdr_length += VLAN_HLEN;
first_desc->tcp_hdr_offset = VLAN_HLEN;
first_desc->ip_hdr_offset = VLAN_HLEN;
/* Only in case of TSO on vlan device */
flags |= FLAGS_VLAN_TAGGED;
+
+ /* Create a TSO vlan header template for firmware */
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) -
+ offset, hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *) hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vlan_tci);
+
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16, copy_len - 16);
+
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
}
- opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
- TX_TCP_LSO6 : TX_TCP_LSO;
- tso = 1;
+ while (copied < hdr_len) {
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) -
+ offset, (hdr_len - copied));
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *) hwdesc + offset, copy_len);
+
+ copied += copy_len;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ smp_mb();
+ adapter->stats.lso_frames++;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4proto;
- if (protocol == cpu_to_be16(ETH_P_IP)) {
+ if (protocol == ETH_P_IP) {
l4proto = ip_hdr(skb)->protocol;
if (l4proto == IPPROTO_TCP)
opcode = TX_TCP_PKT;
else if (l4proto == IPPROTO_UDP)
opcode = TX_UDP_PKT;
- } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+ } else if (protocol == ETH_P_IPV6) {
l4proto = ipv6_hdr(skb)->nexthdr;
if (l4proto == IPPROTO_TCP)
@@ -1939,63 +2089,11 @@ qlcnic_tso_check(struct net_device *netdev,
opcode = TX_UDPV6_PKT;
}
}
-
first_desc->tcp_hdr_offset += skb_transport_offset(skb);
first_desc->ip_hdr_offset += skb_network_offset(skb);
qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
- if (!tso)
- return;
-
- /* For LSO, we need to copy the MAC/IP/TCP headers into
- * the descriptor ring
- */
- copied = 0;
- offset = 2;
-
- if (vlan_oob) {
- /* Create a TSO vlan header template for firmware */
-
- hwdesc = &tx_ring->desc_head[producer];
- tx_ring->cmd_buf_arr[producer].skb = NULL;
-
- copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
- hdr_len + VLAN_HLEN);
-
- vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
- skb_copy_from_linear_data(skb, vh, 12);
- vh->h_vlan_proto = htons(ETH_P_8021Q);
- vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
-
- skb_copy_from_linear_data_offset(skb, 12,
- (char *)vh + 16, copy_len - 16);
-
- copied = copy_len - VLAN_HLEN;
- offset = 0;
-
- producer = get_next_index(producer, tx_ring->num_desc);
- }
-
- while (copied < hdr_len) {
-
- copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
- (hdr_len - copied));
-
- hwdesc = &tx_ring->desc_head[producer];
- tx_ring->cmd_buf_arr[producer].skb = NULL;
-
- skb_copy_from_linear_data_offset(skb, copied,
- (char *)hwdesc + offset, copy_len);
-
- copied += copy_len;
- offset = 0;
-
- producer = get_next_index(producer, tx_ring->num_desc);
- }
-
- tx_ring->producer = producer;
- barrier();
- adapter->stats.lso_frames++;
+ return 0;
}
static int
@@ -2046,39 +2144,21 @@ out_err:
return -ENOMEM;
}
-static int
-qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
- struct sk_buff *skb,
- struct cmd_desc_type0 *first_desc)
+static void
+qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
+ struct qlcnic_cmd_buffer *pbuf)
{
- u8 opcode = 0;
- u16 flags = 0;
- __be16 protocol = skb->protocol;
- struct vlan_ethhdr *vh;
+ struct qlcnic_skb_frag *nf = &pbuf->frag_array[0];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int i;
- if (protocol == cpu_to_be16(ETH_P_8021Q)) {
- vh = (struct vlan_ethhdr *)skb->data;
- protocol = vh->h_vlan_encapsulated_proto;
- flags = FLAGS_VLAN_TAGGED;
- qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
- } else if (vlan_tx_tag_present(skb)) {
- flags = FLAGS_VLAN_OOB;
- qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
+ for (i = 0; i < nr_frags; i++) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
}
- if (unlikely(adapter->pvid)) {
- if (first_desc->vlan_TCI &&
- !(adapter->flags & QLCNIC_TAGGING_ENABLED))
- return -EIO;
- if (first_desc->vlan_TCI &&
- (adapter->flags & QLCNIC_TAGGING_ENABLED))
- goto set_flags;
- flags = FLAGS_VLAN_OOB;
- qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
- }
-set_flags:
- qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
- return 0;
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
}
static inline void
@@ -2099,10 +2179,11 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
struct cmd_desc_type0 *hwdesc, *first_desc;
struct pci_dev *pdev;
struct ethhdr *phdr;
+ int delta = 0;
int i, k;
u32 producer;
- int frag_count, no_of_desc;
+ int frag_count;
u32 num_txd = tx_ring->num_desc;
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
@@ -2118,13 +2199,22 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
frag_count = skb_shinfo(skb)->nr_frags + 1;
+ /* 14 frags supported for normal packet and
+ * 32 frags supported for TSO packet
+ */
+ if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) {
- /* 4 fragments per cmd des */
- no_of_desc = (frag_count + 3) >> 2;
+ for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++)
+ delta += skb_shinfo(skb)->frags[i].size;
+
+ if (!__pskb_pull_tail(skb, delta))
+ goto drop_packet;
+
+ frag_count = 1 + skb_shinfo(skb)->nr_frags;
+ }
if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
netif_stop_queue(netdev);
- smp_mb();
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
netif_start_queue(netdev);
else {
@@ -2141,9 +2231,6 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
first_desc = hwdesc = &tx_ring->desc_head[producer];
qlcnic_clear_cmddesc((u64 *)hwdesc);
- if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
- goto drop_packet;
-
if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
adapter->stats.tx_dma_map_error++;
goto drop_packet;
@@ -2187,8 +2274,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
tx_ring->producer = get_next_index(producer, num_txd);
+ smp_mb();
- qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+ goto unwind_buff;
if (qlcnic_mac_learn)
qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
@@ -2200,6 +2289,8 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
+unwind_buff:
+ qlcnic_unmap_buffers(pdev, skb, pbuf);
drop_packet:
adapter->stats.txdropped++;
dev_kfree_skb_any(skb);
@@ -2246,16 +2337,16 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
{
struct net_device *netdev = adapter->netdev;
- if (adapter->ahw.linkup && !linkup) {
+ if (adapter->ahw->linkup && !linkup) {
netdev_info(netdev, "NIC Link is down\n");
- adapter->ahw.linkup = 0;
+ adapter->ahw->linkup = 0;
if (netif_running(netdev)) {
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
- } else if (!adapter->ahw.linkup && linkup) {
+ } else if (!adapter->ahw->linkup && linkup) {
netdev_info(netdev, "NIC Link is up\n");
- adapter->ahw.linkup = 1;
+ adapter->ahw->linkup = 1;
if (netif_running(netdev)) {
netif_carrier_on(netdev);
netif_wake_queue(netdev);
@@ -2491,7 +2582,7 @@ static void qlcnic_poll_controller(struct net_device *netdev)
int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
disable_irq(adapter->irq);
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -2742,6 +2833,8 @@ skip_ack_check:
set_bit(__QLCNIC_START_FW, &adapter->state);
QLCDB(adapter, DRV, "Restarting fw\n");
qlcnic_idc_debug_info(adapter, 0);
+ QLCDB(adapter, DRV, "Take FW dump\n");
+ qlcnic_dump_fw(adapter);
}
qlcnic_api_unlock(adapter);
@@ -2840,7 +2933,7 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
}
/*Transit to RESET state from READY state only */
-static void
+void
qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
{
u32 state;
@@ -3252,6 +3345,56 @@ static struct device_attribute dev_attr_diag_mode = {
.store = qlcnic_store_diag_mode,
};
+int qlcnic_validate_max_rss(struct net_device *netdev, u8 max_hw, u8 val)
+{
+ if (!use_msi_x && !use_msi) {
+ netdev_info(netdev, "no msix or msi support, hence no rss\n");
+ return -EINVAL;
+ }
+
+ if ((val > max_hw) || (val < 2) || !is_power_of_2(val)) {
+ netdev_info(netdev, "rss_ring valid range [2 - %x] in "
+ " powers of 2\n", max_hw);
+ return -EINVAL;
+ }
+ return 0;
+
+}
+
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err = 0;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ qlcnic_detach(adapter);
+ qlcnic_teardown_intr(adapter);
+
+ if (qlcnic_enable_msix(adapter, data)) {
+ netdev_info(netdev, "failed setting max_rss; rss disabled\n");
+ qlcnic_enable_msi_legacy(adapter);
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto done;
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ goto done;
+ qlcnic_restore_indev_addr(netdev, NETDEV_UP);
+ }
+ done:
+ netif_device_attach(netdev);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
static int
qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
loff_t offset, size_t size)
@@ -3382,7 +3525,6 @@ qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
-
static struct bin_attribute bin_attr_crb = {
.attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
.size = 0,
@@ -3501,7 +3643,7 @@ validate_esw_config(struct qlcnic_adapter *adapter,
u8 pci_func;
int i;
- op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
+ op_mode = readl(adapter->ahw->pci_base0 + QLCNIC_DRV_OP_MODE);
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
@@ -3567,13 +3709,13 @@ qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
return QL_STATUS_INVALID_PARAM;
- if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
+ if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
op_mode = esw_cfg[i].op_mode;
qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
esw_cfg[i].op_mode = op_mode;
- esw_cfg[i].pci_func = adapter->ahw.pci_func;
+ esw_cfg[i].pci_func = adapter->ahw->pci_func;
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -3954,14 +4096,14 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
dev_info(dev, "failed to create crb sysfs entry\n");
if (device_create_bin_file(dev, &bin_attr_mem))
dev_info(dev, "failed to create mem sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_pci_config))
+ dev_info(dev, "failed to create pci config sysfs entry");
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
if (device_create_bin_file(dev, &bin_attr_esw_config))
dev_info(dev, "failed to create esw config sysfs entry");
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return;
- if (device_create_bin_file(dev, &bin_attr_pci_config))
- dev_info(dev, "failed to create pci config sysfs entry");
if (device_create_bin_file(dev, &bin_attr_npar_config))
dev_info(dev, "failed to create npar config sysfs entry");
if (device_create_bin_file(dev, &bin_attr_pm_config))
@@ -3982,12 +4124,12 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
device_remove_file(dev, &dev_attr_diag_mode);
device_remove_bin_file(dev, &bin_attr_crb);
device_remove_bin_file(dev, &bin_attr_mem);
+ device_remove_bin_file(dev, &bin_attr_pci_config);
if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
return;
device_remove_bin_file(dev, &bin_attr_esw_config);
if (adapter->op_mode != QLCNIC_MGMT_FUNC)
return;
- device_remove_bin_file(dev, &bin_attr_pci_config);
device_remove_bin_file(dev, &bin_attr_npar_config);
device_remove_bin_file(dev, &bin_attr_pm_config);
device_remove_bin_file(dev, &bin_attr_esw_stats);
@@ -4034,14 +4176,10 @@ qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
qlcnic_config_indev_addr(adapter, netdev, event);
- if (!adapter->vlgrp)
- return;
-
- for (vid = 0; vid < VLAN_N_VID; vid++) {
- dev = vlan_group_get_device(adapter->vlgrp, vid);
+ for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
+ dev = vlan_find_dev(netdev, vid);
if (!dev)
continue;
-
qlcnic_config_indev_addr(adapter, dev, event);
}
}
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 4757c59a07a2..d32850715f5c 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -2134,7 +2134,7 @@ struct ql_adapter {
struct delayed_work mpi_idc_work;
struct delayed_work mpi_core_to_log;
struct completion ide_completion;
- struct nic_operations *nic_ops;
+ const struct nic_operations *nic_ops;
u16 device_id;
struct timer_list timer;
atomic_t lb_count;
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 8149cc9de4ca..19b00fa0eaf0 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -356,7 +356,7 @@ static int ql_get_settings(struct net_device *ndev,
ecmd->port = PORT_FIBRE;
}
- ecmd->speed = SPEED_10000;
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
ecmd->duplex = DUPLEX_FULL;
return 0;
@@ -412,31 +412,31 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return 0;
}
-static int ql_phys_id(struct net_device *ndev, u32 data)
+static int ql_set_phys_id(struct net_device *ndev,
+ enum ethtool_phys_id_state state)
+
{
struct ql_adapter *qdev = netdev_priv(ndev);
- u32 led_reg, i;
- int status;
-
- /* Save the current LED settings */
- status = ql_mb_get_led_cfg(qdev);
- if (status)
- return status;
- led_reg = qdev->led_config;
- /* Start blinking the led */
- if (!data || data > 300)
- data = 300;
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ /* Save the current LED settings */
+ if (ql_mb_get_led_cfg(qdev))
+ return -EIO;
- for (i = 0; i < (data * 10); i++)
+ /* Start blinking */
ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
+ return 0;
- /* Restore LED settings */
- status = ql_mb_set_led_cfg(qdev, led_reg);
- if (status)
- return status;
+ case ETHTOOL_ID_INACTIVE:
+ /* Restore LED settings */
+ if (ql_mb_set_led_cfg(qdev, qdev->led_config))
+ return -EIO;
+ return 0;
- return 0;
+ default:
+ return -EINVAL;
+ }
}
static int ql_start_loopback(struct ql_adapter *qdev)
@@ -655,32 +655,6 @@ static int ql_set_pauseparam(struct net_device *netdev,
return status;
}
-static u32 ql_get_rx_csum(struct net_device *netdev)
-{
- struct ql_adapter *qdev = netdev_priv(netdev);
- return qdev->rx_csum;
-}
-
-static int ql_set_rx_csum(struct net_device *netdev, uint32_t data)
-{
- struct ql_adapter *qdev = netdev_priv(netdev);
- qdev->rx_csum = data;
- return 0;
-}
-
-static int ql_set_tso(struct net_device *ndev, uint32_t data)
-{
-
- if (data) {
- ndev->features |= NETIF_F_TSO;
- ndev->features |= NETIF_F_TSO6;
- } else {
- ndev->features &= ~NETIF_F_TSO;
- ndev->features &= ~NETIF_F_TSO6;
- }
- return 0;
-}
-
static u32 ql_get_msglevel(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
@@ -703,18 +677,10 @@ const struct ethtool_ops qlge_ethtool_ops = {
.get_msglevel = ql_get_msglevel,
.set_msglevel = ql_set_msglevel,
.get_link = ethtool_op_get_link,
- .phys_id = ql_phys_id,
+ .set_phys_id = ql_set_phys_id,
.self_test = ql_self_test,
.get_pauseparam = ql_get_pauseparam,
.set_pauseparam = ql_set_pauseparam,
- .get_rx_csum = ql_get_rx_csum,
- .set_rx_csum = ql_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = ql_set_tso,
.get_coalesce = ql_get_coalesce,
.set_coalesce = ql_set_coalesce,
.get_sset_count = ql_get_sset_count,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 49bfa5813068..6c9d124cfc76 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -660,7 +660,7 @@ static void ql_disable_interrupts(struct ql_adapter *qdev)
/* If we're running with multiple MSI-X vectors then we enable on the fly.
* Otherwise, we may have multiple outstanding workers and don't want to
* enable until the last one finishes. In this case, the irq_cnt gets
- * incremented everytime we queue a worker and decremented everytime
+ * incremented every time we queue a worker and decremented every time
* a worker finishes. Once it hits zero we enable the interrupt.
*/
u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
@@ -1571,7 +1571,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
- if (qdev->rx_csum &&
+ if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -1684,7 +1684,7 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
/* If rx checksum is on, and there are no
* csum or frame errors.
*/
- if (qdev->rx_csum &&
+ if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -2004,7 +2004,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
/* If rx checksum is on, and there are no
* csum or frame errors.
*/
- if (qdev->rx_csum &&
+ if ((ndev->features & NETIF_F_RXCSUM) &&
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
@@ -3299,7 +3299,7 @@ msi:
* will service it. An example would be if there are
* 2 vectors (so 2 RSS rings) and 8 TX completion rings.
* This would mean that vector 0 would service RSS ring 0
- * and TX competion rings 0,1,2 and 3. Vector 1 would
+ * and TX completion rings 0,1,2 and 3. Vector 1 would
* service RSS ring 1 and TX completion rings 4,5,6 and 7.
*/
static void ql_set_tx_vect(struct ql_adapter *qdev)
@@ -4152,7 +4152,7 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
int i, status;
u32 lbq_buf_len;
- /* Wait for an oustanding reset to complete. */
+ /* Wait for an outstanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
int i = 3;
while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
@@ -4281,7 +4281,7 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
netif_err(qdev, hw, qdev->ndev,
- "Failed to set promiscous mode.\n");
+ "Failed to set promiscuous mode.\n");
} else {
set_bit(QL_PROMISCUOUS, &qdev->flags);
}
@@ -4291,7 +4291,7 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
netif_err(qdev, hw, qdev->ndev,
- "Failed to clear promiscous mode.\n");
+ "Failed to clear promiscuous mode.\n");
} else {
clear_bit(QL_PROMISCUOUS, &qdev->flags);
}
@@ -4412,12 +4412,12 @@ error:
rtnl_unlock();
}
-static struct nic_operations qla8012_nic_ops = {
+static const struct nic_operations qla8012_nic_ops = {
.get_flash = ql_get_8012_flash_params,
.port_initialize = ql_8012_port_initialize,
};
-static struct nic_operations qla8000_nic_ops = {
+static const struct nic_operations qla8000_nic_ops = {
.get_flash = ql_get_8000_flash_params,
.port_initialize = ql_8000_port_initialize,
};
@@ -4621,7 +4621,6 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
/*
* Set up the operating parameters.
*/
- qdev->rx_csum = 1;
qdev->workqueue = create_singlethread_workqueue(ndev->name);
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
@@ -4695,15 +4694,11 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
qdev = netdev_priv(ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->features = (0
- | NETIF_F_IP_CSUM
- | NETIF_F_SG
- | NETIF_F_TSO
- | NETIF_F_TSO6
- | NETIF_F_TSO_ECN
- | NETIF_F_HW_VLAN_TX
- | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER);
- ndev->features |= NETIF_F_GRO;
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+ NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
+ ndev->features = ndev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index e3ebd90ae651..200a363c3bf5 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -535,7 +535,7 @@ static int r6040_rx(struct net_device *dev, int limit)
/* RX dribble */
if (err & DSC_RX_ERR_DRI)
dev->stats.rx_frame_errors++;
- /* Buffer lenght exceeded */
+ /* Buffer length exceeded */
if (err & DSC_RX_ERR_BUF)
dev->stats.rx_length_errors++;
/* Packet too long */
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 493b0de3848b..04f4e6086cd0 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -37,6 +37,8 @@
#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
+#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
+#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
#ifdef RTL8169_DEBUG
@@ -96,77 +98,123 @@ static const int multicast_filter_limit = 32;
#define RTL_R32(reg) readl (ioaddr + (reg))
enum mac_version {
- RTL_GIGA_MAC_NONE = 0x00,
- RTL_GIGA_MAC_VER_01 = 0x01, // 8169
- RTL_GIGA_MAC_VER_02 = 0x02, // 8169S
- RTL_GIGA_MAC_VER_03 = 0x03, // 8110S
- RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
- RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
- RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
- RTL_GIGA_MAC_VER_07 = 0x07, // 8102e
- RTL_GIGA_MAC_VER_08 = 0x08, // 8102e
- RTL_GIGA_MAC_VER_09 = 0x09, // 8102e
- RTL_GIGA_MAC_VER_10 = 0x0a, // 8101e
- RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
- RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be
- RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb
- RTL_GIGA_MAC_VER_14 = 0x0e, // 8101 ?
- RTL_GIGA_MAC_VER_15 = 0x0f, // 8101 ?
- RTL_GIGA_MAC_VER_16 = 0x11, // 8101Ec
- RTL_GIGA_MAC_VER_17 = 0x10, // 8168Bf
- RTL_GIGA_MAC_VER_18 = 0x12, // 8168CP
- RTL_GIGA_MAC_VER_19 = 0x13, // 8168C
- RTL_GIGA_MAC_VER_20 = 0x14, // 8168C
- RTL_GIGA_MAC_VER_21 = 0x15, // 8168C
- RTL_GIGA_MAC_VER_22 = 0x16, // 8168C
- RTL_GIGA_MAC_VER_23 = 0x17, // 8168CP
- RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP
- RTL_GIGA_MAC_VER_25 = 0x19, // 8168D
- RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
- RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP
- RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP
- RTL_GIGA_MAC_VER_29 = 0x1d, // 8105E
- RTL_GIGA_MAC_VER_30 = 0x1e, // 8105E
+ RTL_GIGA_MAC_VER_01 = 0,
+ RTL_GIGA_MAC_VER_02,
+ RTL_GIGA_MAC_VER_03,
+ RTL_GIGA_MAC_VER_04,
+ RTL_GIGA_MAC_VER_05,
+ RTL_GIGA_MAC_VER_06,
+ RTL_GIGA_MAC_VER_07,
+ RTL_GIGA_MAC_VER_08,
+ RTL_GIGA_MAC_VER_09,
+ RTL_GIGA_MAC_VER_10,
+ RTL_GIGA_MAC_VER_11,
+ RTL_GIGA_MAC_VER_12,
+ RTL_GIGA_MAC_VER_13,
+ RTL_GIGA_MAC_VER_14,
+ RTL_GIGA_MAC_VER_15,
+ RTL_GIGA_MAC_VER_16,
+ RTL_GIGA_MAC_VER_17,
+ RTL_GIGA_MAC_VER_18,
+ RTL_GIGA_MAC_VER_19,
+ RTL_GIGA_MAC_VER_20,
+ RTL_GIGA_MAC_VER_21,
+ RTL_GIGA_MAC_VER_22,
+ RTL_GIGA_MAC_VER_23,
+ RTL_GIGA_MAC_VER_24,
+ RTL_GIGA_MAC_VER_25,
+ RTL_GIGA_MAC_VER_26,
+ RTL_GIGA_MAC_VER_27,
+ RTL_GIGA_MAC_VER_28,
+ RTL_GIGA_MAC_VER_29,
+ RTL_GIGA_MAC_VER_30,
+ RTL_GIGA_MAC_VER_31,
+ RTL_GIGA_MAC_VER_32,
+ RTL_GIGA_MAC_VER_33,
+ RTL_GIGA_MAC_NONE = 0xff,
};
-#define _R(NAME,MAC,MASK) \
- { .name = NAME, .mac_version = MAC, .RxConfigMask = MASK }
+enum rtl_tx_desc_version {
+ RTL_TD_0 = 0,
+ RTL_TD_1 = 1,
+};
+
+#define _R(NAME,TD,FW) \
+ { .name = NAME, .txd_version = TD, .fw_name = FW }
static const struct {
const char *name;
- u8 mac_version;
- u32 RxConfigMask; /* Clears the bits supported by this chip */
-} rtl_chip_info[] = {
- _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880), // 8169
- _R("RTL8169s", RTL_GIGA_MAC_VER_02, 0xff7e1880), // 8169S
- _R("RTL8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880), // 8110S
- _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
- _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
- _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
- _R("RTL8102e", RTL_GIGA_MAC_VER_07, 0xff7e1880), // PCI-E
- _R("RTL8102e", RTL_GIGA_MAC_VER_08, 0xff7e1880), // PCI-E
- _R("RTL8102e", RTL_GIGA_MAC_VER_09, 0xff7e1880), // PCI-E
- _R("RTL8101e", RTL_GIGA_MAC_VER_10, 0xff7e1880), // PCI-E
- _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
- _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
- _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
- _R("RTL8100e", RTL_GIGA_MAC_VER_14, 0xff7e1880), // PCI-E 8139
- _R("RTL8100e", RTL_GIGA_MAC_VER_15, 0xff7e1880), // PCI-E 8139
- _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_17, 0xff7e1880), // PCI-E
- _R("RTL8101e", RTL_GIGA_MAC_VER_16, 0xff7e1880), // PCI-E
- _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_18, 0xff7e1880), // PCI-E
- _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_19, 0xff7e1880), // PCI-E
- _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_20, 0xff7e1880), // PCI-E
- _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_21, 0xff7e1880), // PCI-E
- _R("RTL8168c/8111c", RTL_GIGA_MAC_VER_22, 0xff7e1880), // PCI-E
- _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_23, 0xff7e1880), // PCI-E
- _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E
- _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
- _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
- _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E
- _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880), // PCI-E
- _R("RTL8105e", RTL_GIGA_MAC_VER_29, 0xff7e1880), // PCI-E
- _R("RTL8105e", RTL_GIGA_MAC_VER_30, 0xff7e1880) // PCI-E
+ enum rtl_tx_desc_version txd_version;
+ const char *fw_name;
+} rtl_chip_infos[] = {
+ /* PCI devices. */
+ [RTL_GIGA_MAC_VER_01] =
+ _R("RTL8169", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_02] =
+ _R("RTL8169s", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_03] =
+ _R("RTL8110s", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_04] =
+ _R("RTL8169sb/8110sb", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_05] =
+ _R("RTL8169sc/8110sc", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_06] =
+ _R("RTL8169sc/8110sc", RTL_TD_0, NULL),
+ /* PCI-E devices. */
+ [RTL_GIGA_MAC_VER_07] =
+ _R("RTL8102e", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_08] =
+ _R("RTL8102e", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_09] =
+ _R("RTL8102e", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_10] =
+ _R("RTL8101e", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_11] =
+ _R("RTL8168b/8111b", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_12] =
+ _R("RTL8168b/8111b", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_13] =
+ _R("RTL8101e", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_14] =
+ _R("RTL8100e", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_15] =
+ _R("RTL8100e", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_16] =
+ _R("RTL8101e", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_17] =
+ _R("RTL8168b/8111b", RTL_TD_0, NULL),
+ [RTL_GIGA_MAC_VER_18] =
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_19] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_20] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_21] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_22] =
+ _R("RTL8168c/8111c", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_23] =
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_24] =
+ _R("RTL8168cp/8111cp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_25] =
+ _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1),
+ [RTL_GIGA_MAC_VER_26] =
+ _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2),
+ [RTL_GIGA_MAC_VER_27] =
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_28] =
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_29] =
+ _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1),
+ [RTL_GIGA_MAC_VER_30] =
+ _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1),
+ [RTL_GIGA_MAC_VER_31] =
+ _R("RTL8168dp/8111dp", RTL_TD_1, NULL),
+ [RTL_GIGA_MAC_VER_32] =
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1),
+ [RTL_GIGA_MAC_VER_33] =
+ _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2)
};
#undef _R
@@ -222,6 +270,9 @@ enum rtl_registers {
IntrStatus = 0x3e,
TxConfig = 0x40,
RxConfig = 0x44,
+
+#define RTL_RX_CONFIG_MASK 0xff7e1880u
+
RxMissed = 0x4c,
Cfg9346 = 0x50,
Config0 = 0x51,
@@ -315,7 +366,9 @@ enum rtl8168_registers {
#define OCPAR_FLAG 0x80000000
#define OCPAR_GPHY_WRITE_CMD 0x8000f060
#define OCPAR_GPHY_READ_CMD 0x0000f060
- RDSAR1 = 0xd0 /* 8168c only. Undocumented on 8168dp */
+ RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
+ MISC = 0xf0, /* 8168e only. */
+#define TXPLA_RST (1 << 29)
};
enum rtl_register_content {
@@ -393,6 +446,7 @@ enum rtl_register_content {
BWF = (1 << 6), /* Accept Broadcast wakeup frame */
MWF = (1 << 5), /* Accept Multicast wakeup frame */
UWF = (1 << 4), /* Accept Unicast wakeup frame */
+ Spi_en = (1 << 3),
LanWake = (1 << 1), /* LanWake enable/disable */
PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
@@ -441,21 +495,69 @@ enum rtl_register_content {
CounterDump = 0x8,
};
-enum desc_status_bit {
+enum rtl_desc_bit {
+ /* First doubleword. */
DescOwn = (1 << 31), /* Descriptor is owned by NIC */
RingEnd = (1 << 30), /* End of descriptor ring */
FirstFrag = (1 << 29), /* First segment of a packet */
LastFrag = (1 << 28), /* Final segment of a packet */
+};
+
+/* Generic case. */
+enum rtl_tx_desc_bit {
+ /* First doubleword. */
+ TD_LSO = (1 << 27), /* Large Send Offload */
+#define TD_MSS_MAX 0x07ffu /* MSS value */
+
+ /* Second doubleword. */
+ TxVlanTag = (1 << 17), /* Add VLAN tag */
+};
+
+/* 8169, 8168b and 810x except 8102e. */
+enum rtl_tx_desc_bit_0 {
+ /* First doubleword. */
+#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
+ TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
+ TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
+ TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
+};
- /* Tx private */
- LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
- MSSShift = 16, /* MSS value position */
- MSSMask = 0xfff, /* MSS value + LargeSend bit: 12 bits */
- IPCS = (1 << 18), /* Calculate IP checksum */
- UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
- TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
- TxVlanTag = (1 << 17), /* Add VLAN tag */
+/* 8102e, 8168c and beyond. */
+enum rtl_tx_desc_bit_1 {
+ /* Second doubleword. */
+#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
+ TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
+ TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
+ TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
+};
+
+static const struct rtl_tx_desc_info {
+ struct {
+ u32 udp;
+ u32 tcp;
+ } checksum;
+ u16 mss_shift;
+ u16 opts_offset;
+} tx_desc_info [] = {
+ [RTL_TD_0] = {
+ .checksum = {
+ .udp = TD0_IP_CS | TD0_UDP_CS,
+ .tcp = TD0_IP_CS | TD0_TCP_CS
+ },
+ .mss_shift = TD0_MSS_SHIFT,
+ .opts_offset = 0
+ },
+ [RTL_TD_1] = {
+ .checksum = {
+ .udp = TD1_IP_CS | TD1_UDP_CS,
+ .tcp = TD1_IP_CS | TD1_TCP_CS
+ },
+ .mss_shift = TD1_MSS_SHIFT,
+ .opts_offset = 1
+ }
+};
+enum rtl_rx_desc_bit {
/* Rx private */
PID1 = (1 << 18), /* Protocol ID bit 1/2 */
PID0 = (1 << 17), /* Protocol ID bit 2/2 */
@@ -515,13 +617,13 @@ struct rtl8169_counters {
struct rtl8169_private {
void __iomem *mmio_addr; /* memory map physical address */
- struct pci_dev *pci_dev; /* Index of PCI device */
+ struct pci_dev *pci_dev;
struct net_device *dev;
struct napi_struct napi;
- spinlock_t lock; /* spin lock flag */
+ spinlock_t lock;
u32 msg_enable;
- int chipset;
- int mac_version;
+ u16 txd_version;
+ u16 mac_version;
u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
u32 dirty_rx;
@@ -537,7 +639,6 @@ struct rtl8169_private {
u16 intr_event;
u16 napi_event;
u16 intr_mask;
- int phy_1000_ctrl_reg;
struct mdio_ops {
void (*write)(void __iomem *, int, int);
@@ -565,6 +666,7 @@ struct rtl8169_private {
u32 saved_wolopts;
const struct firmware *fw;
+#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN);
};
MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -577,6 +679,8 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(RTL8169_VERSION);
MODULE_FIRMWARE(FIRMWARE_8168D_1);
MODULE_FIRMWARE(FIRMWARE_8168D_2);
+MODULE_FIRMWARE(FIRMWARE_8168E_1);
+MODULE_FIRMWARE(FIRMWARE_8168E_2);
MODULE_FIRMWARE(FIRMWARE_8105E_1);
static int rtl8169_open(struct net_device *dev);
@@ -648,32 +752,49 @@ static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
#define OOB_CMD_DRIVER_START 0x05
#define OOB_CMD_DRIVER_STOP 0x06
+static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
+{
+ return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
+}
+
static void rtl8168_driver_start(struct rtl8169_private *tp)
{
+ u16 reg;
int i;
rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
+ reg = rtl8168_get_ocp_reg(tp);
+
for (i = 0; i < 10; i++) {
msleep(10);
- if (ocp_read(tp, 0x0f, 0x0010) & 0x00000800)
+ if (ocp_read(tp, 0x0f, reg) & 0x00000800)
break;
}
}
static void rtl8168_driver_stop(struct rtl8169_private *tp)
{
+ u16 reg;
int i;
rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
+ reg = rtl8168_get_ocp_reg(tp);
+
for (i = 0; i < 10; i++) {
msleep(10);
- if ((ocp_read(tp, 0x0f, 0x0010) & 0x00000800) == 0)
+ if ((ocp_read(tp, 0x0f, reg) & 0x00000800) == 0)
break;
}
}
+static int r8168dp_check_dash(struct rtl8169_private *tp)
+{
+ u16 reg = rtl8168_get_ocp_reg(tp);
+
+ return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
+}
static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
{
@@ -972,9 +1093,8 @@ static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
}
static void __rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr,
- bool pm)
+ struct rtl8169_private *tp,
+ void __iomem *ioaddr, bool pm)
{
unsigned long flags;
@@ -1091,6 +1211,11 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
+static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
+{
+ return rtl_chip_infos[tp->mac_version].fw_name;
+}
+
static void rtl8169_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1099,6 +1224,8 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
strcpy(info->driver, MODULENAME);
strcpy(info->version, RTL8169_VERSION);
strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strncpy(info->fw_version, IS_ERR_OR_NULL(tp->fw) ? "N/A" :
+ rtl_lookup_firmware_name(tp), sizeof(info->fw_version) - 1);
}
static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1160,16 +1287,7 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
/* The 8100e/8101e/8102e do Fast Ethernet only. */
- if ((tp->mac_version != RTL_GIGA_MAC_VER_07) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_08) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_09) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_10) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_13) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_14) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_15) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_16) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_29) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_30)) {
+ if (tp->mii.supports_gmii) {
if (adv & ADVERTISED_1000baseT_Half)
giga_ctrl |= ADVERTISE_1000HALF;
if (adv & ADVERTISED_1000baseT_Full)
@@ -1199,12 +1317,10 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
bmcr |= BMCR_FULLDPLX;
}
- tp->phy_1000_ctrl_reg = giga_ctrl;
-
rtl_writephy(tp, MII_BMCR, bmcr);
- if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03) {
if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
rtl_writephy(tp, 0x17, 0x2138);
rtl_writephy(tp, 0x0e, 0x0260);
@@ -1226,10 +1342,14 @@ static int rtl8169_set_speed(struct net_device *dev,
int ret;
ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
+ if (ret < 0)
+ goto out;
- if (netif_running(dev) && (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
+ if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
+ (advertising & ADVERTISED_1000baseT_Full)) {
mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
-
+ }
+out:
return ret;
}
@@ -1239,22 +1359,25 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
unsigned long flags;
int ret;
+ del_timer_sync(&tp->timer);
+
spin_lock_irqsave(&tp->lock, flags);
- ret = rtl8169_set_speed(dev,
- cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
+ ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
+ cmd->duplex, cmd->advertising);
spin_unlock_irqrestore(&tp->lock, flags);
return ret;
}
-static u32 rtl8169_get_rx_csum(struct net_device *dev)
+static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
{
- struct rtl8169_private *tp = netdev_priv(dev);
+ if (dev->mtu > TD_MSS_MAX)
+ features &= ~NETIF_F_ALL_TSO;
- return tp->cp_cmd & RxChkSum;
+ return features;
}
-static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
+static int rtl8169_set_features(struct net_device *dev, u32 features)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
@@ -1262,11 +1385,16 @@ static int rtl8169_set_rx_csum(struct net_device *dev, u32 data)
spin_lock_irqsave(&tp->lock, flags);
- if (data)
+ if (features & NETIF_F_RXCSUM)
tp->cp_cmd |= RxChkSum;
else
tp->cp_cmd &= ~RxChkSum;
+ if (dev->features & NETIF_F_HW_VLAN_RX)
+ tp->cp_cmd |= RxVlan;
+ else
+ tp->cp_cmd &= ~RxVlan;
+
RTL_W16(CPlusCmd, tp->cp_cmd);
RTL_R16(CPlusCmd);
@@ -1282,27 +1410,6 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
}
-#define NETIF_F_HW_VLAN_TX_RX (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX)
-
-static void rtl8169_vlan_mode(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- unsigned long flags;
-
- spin_lock_irqsave(&tp->lock, flags);
- if (dev->features & NETIF_F_HW_VLAN_RX)
- tp->cp_cmd |= RxVlan;
- else
- tp->cp_cmd &= ~RxVlan;
- RTL_W16(CPlusCmd, tp->cp_cmd);
- /* PCI commit */
- RTL_R16(CPlusCmd);
- spin_unlock_irqrestore(&tp->lock, flags);
-
- dev->vlan_features = dev->features &~ NETIF_F_HW_VLAN_TX_RX;
-}
-
static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
{
u32 opts2 = le32_to_cpu(desc->opts2);
@@ -1328,7 +1435,7 @@ static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
cmd->autoneg = !!(status & TBINwEnable);
- cmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
cmd->duplex = DUPLEX_FULL; /* Always set */
return 0;
@@ -1413,11 +1520,11 @@ static void rtl8169_update_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ struct device *d = &tp->pci_dev->dev;
struct rtl8169_counters *counters;
dma_addr_t paddr;
u32 cmd;
int wait = 1000;
- struct device *d = &tp->pci_dev->dev;
/*
* Some chips are unable to dump tally counters when the receiver
@@ -1437,7 +1544,6 @@ static void rtl8169_update_counters(struct net_device *dev)
while (wait--) {
if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
- /* copy updated counters */
memcpy(&tp->counters, counters, sizeof(*counters));
break;
}
@@ -1483,28 +1589,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
}
}
-static int rtl8169_set_flags(struct net_device *dev, u32 data)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- unsigned long old_feat = dev->features;
- int rc;
-
- if ((tp->mac_version == RTL_GIGA_MAC_VER_05) &&
- !(data & ETH_FLAG_RXVLAN)) {
- netif_info(tp, drv, dev, "8110SCd requires hardware Rx VLAN\n");
- return -EINVAL;
- }
-
- rc = ethtool_op_set_flags(dev, data, ETH_FLAG_TXVLAN | ETH_FLAG_RXVLAN);
- if (rc)
- return rc;
-
- if ((old_feat ^ dev->features) & NETIF_F_HW_VLAN_RX)
- rtl8169_vlan_mode(dev);
-
- return 0;
-}
-
static const struct ethtool_ops rtl8169_ethtool_ops = {
.get_drvinfo = rtl8169_get_drvinfo,
.get_regs_len = rtl8169_get_regs_len,
@@ -1513,24 +1597,18 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
.set_settings = rtl8169_set_settings,
.get_msglevel = rtl8169_get_msglevel,
.set_msglevel = rtl8169_set_msglevel,
- .get_rx_csum = rtl8169_get_rx_csum,
- .set_rx_csum = rtl8169_set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
- .set_tso = ethtool_op_set_tso,
.get_regs = rtl8169_get_regs,
.get_wol = rtl8169_get_wol,
.set_wol = rtl8169_set_wol,
.get_strings = rtl8169_get_strings,
.get_sset_count = rtl8169_get_sset_count,
.get_ethtool_stats = rtl8169_get_ethtool_stats,
- .set_flags = rtl8169_set_flags,
- .get_flags = ethtool_op_get_flags,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
- void __iomem *ioaddr)
+ struct net_device *dev, u8 default_version)
{
+ void __iomem *ioaddr = tp->mmio_addr;
/*
* The driver currently handles the 8168Bf and the 8168Be identically
* but they can be identified more specifically through the test below
@@ -1547,6 +1625,11 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
u32 val;
int mac_version;
} mac_info[] = {
+ /* 8168E family. */
+ { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
+ { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
+ { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
+
/* 8168D family. */
{ 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
{ 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
@@ -1555,6 +1638,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
/* 8168DP family. */
{ 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
{ 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
+ { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
/* 8168C family. */
{ 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
@@ -1574,6 +1658,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
{ 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
/* 8101 family. */
+ { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
{ 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
{ 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
{ 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
@@ -1610,6 +1695,12 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
while ((reg & p->mask) != p->val)
p++;
tp->mac_version = p->mac_version;
+
+ if (tp->mac_version == RTL_GIGA_MAC_NONE) {
+ netif_notice(tp, probe, dev,
+ "unknown MAC, using family default\n");
+ tp->mac_version = default_version;
+ }
}
static void rtl8169_print_mac_version(struct rtl8169_private *tp)
@@ -1679,14 +1770,14 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
case PHY_BJMPN:
if (regno > index) {
netif_err(tp, probe, tp->dev,
- "Out of range of firmware\n");
+ "Out of range of firmware\n");
return;
}
break;
case PHY_READCOUNT_EQ_SKIP:
if (index + 2 >= fw_size) {
netif_err(tp, probe, tp->dev,
- "Out of range of firmware\n");
+ "Out of range of firmware\n");
return;
}
break;
@@ -1695,7 +1786,7 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
case PHY_SKIPN:
if (index + 1 + regno >= fw_size) {
netif_err(tp, probe, tp->dev,
- "Out of range of firmware\n");
+ "Out of range of firmware\n");
return;
}
break;
@@ -1751,10 +1842,7 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
index++;
break;
case PHY_READCOUNT_EQ_SKIP:
- if (count == data)
- index += 2;
- else
- index += 1;
+ index += (count == data) ? 2 : 1;
break;
case PHY_COMP_EQ_SKIPN:
if (predata == data)
@@ -1789,25 +1877,26 @@ rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
static void rtl_release_firmware(struct rtl8169_private *tp)
{
- release_firmware(tp->fw);
- tp->fw = NULL;
+ if (!IS_ERR_OR_NULL(tp->fw))
+ release_firmware(tp->fw);
+ tp->fw = RTL_FIRMWARE_UNKNOWN;
}
-static int rtl_apply_firmware(struct rtl8169_private *tp, const char *fw_name)
+static void rtl_apply_firmware(struct rtl8169_private *tp)
{
- const struct firmware **fw = &tp->fw;
- int rc = !*fw;
-
- if (rc) {
- rc = request_firmware(fw, fw_name, &tp->pci_dev->dev);
- if (rc < 0)
- goto out;
- }
+ const struct firmware *fw = tp->fw;
/* TODO: release firmware once rtl_phy_write_fw signals failures. */
- rtl_phy_write_fw(tp, *fw);
-out:
- return rc;
+ if (!IS_ERR_OR_NULL(fw))
+ rtl_phy_write_fw(tp, fw);
+}
+
+static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
+{
+ if (rtl_readphy(tp, reg) != val)
+ netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
+ else
+ rtl_apply_firmware(tp);
}
static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
@@ -2164,7 +2253,7 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
/*
* Tx Error Issue
- * enhance line driver power
+ * Enhance line driver power
*/
{ 0x1f, 0x0002 },
{ 0x06, 0x5561 },
@@ -2246,10 +2335,8 @@ static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x001b);
- if ((rtl_readphy(tp, 0x06) != 0xbf00) ||
- (rtl_apply_firmware(tp, FIRMWARE_8168D_1) < 0)) {
- netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
- }
+
+ rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
rtl_writephy(tp, 0x1f, 0x0000);
}
@@ -2278,7 +2365,7 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
/*
* Tx Error Issue
- * enhance line driver power
+ * Enhance line driver power
*/
{ 0x1f, 0x0002 },
{ 0x06, 0x5561 },
@@ -2351,10 +2438,8 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0005);
rtl_writephy(tp, 0x05, 0x001b);
- if ((rtl_readphy(tp, 0x06) != 0xb300) ||
- (rtl_apply_firmware(tp, FIRMWARE_8168D_2) < 0)) {
- netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
- }
+
+ rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
rtl_writephy(tp, 0x1f, 0x0000);
}
@@ -2436,6 +2521,79 @@ static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
rtl_patchphy(tp, 0x0d, 1 << 5);
}
+static void rtl8168e_hw_phy_config(struct rtl8169_private *tp)
+{
+ static const struct phy_reg phy_reg_init[] = {
+ /* Enable Delay cap */
+ { 0x1f, 0x0005 },
+ { 0x05, 0x8b80 },
+ { 0x06, 0xc896 },
+ { 0x1f, 0x0000 },
+
+ /* Channel estimation fine tune */
+ { 0x1f, 0x0001 },
+ { 0x0b, 0x6c20 },
+ { 0x07, 0x2872 },
+ { 0x1c, 0xefff },
+ { 0x1f, 0x0003 },
+ { 0x14, 0x6420 },
+ { 0x1f, 0x0000 },
+
+ /* Update PFM & 10M TX idle timer */
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x002f },
+ { 0x15, 0x1919 },
+ { 0x1f, 0x0000 },
+
+ { 0x1f, 0x0007 },
+ { 0x1e, 0x00ac },
+ { 0x18, 0x0006 },
+ { 0x1f, 0x0000 }
+ };
+
+ rtl_apply_firmware(tp);
+
+ rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
+
+ /* DCO enable for 10M IDLE Power */
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x0023);
+ rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* For impedance matching */
+ rtl_writephy(tp, 0x1f, 0x0002);
+ rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ /* PHY auto speed down */
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x002d);
+ rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b86);
+ rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
+ rtl_writephy(tp, 0x1f, 0x0000);
+
+ rtl_writephy(tp, 0x1f, 0x0005);
+ rtl_writephy(tp, 0x05, 0x8b85);
+ rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
+ rtl_writephy(tp, 0x1f, 0x0007);
+ rtl_writephy(tp, 0x1e, 0x0020);
+ rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
+ rtl_writephy(tp, 0x1f, 0x0006);
+ rtl_writephy(tp, 0x00, 0x5a00);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0007);
+ rtl_writephy(tp, 0x0e, 0x003c);
+ rtl_writephy(tp, 0x0d, 0x4007);
+ rtl_writephy(tp, 0x0e, 0x0000);
+ rtl_writephy(tp, 0x0d, 0x0000);
+}
+
static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
{
static const struct phy_reg phy_reg_init[] = {
@@ -2474,8 +2632,7 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x18, 0x0310);
msleep(100);
- if (rtl_apply_firmware(tp, FIRMWARE_8105E_1) < 0)
- netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
+ rtl_apply_firmware(tp);
rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
}
@@ -2551,6 +2708,13 @@ static void rtl_hw_phy_config(struct net_device *dev)
case RTL_GIGA_MAC_VER_30:
rtl8105e_hw_phy_config(tp);
break;
+ case RTL_GIGA_MAC_VER_31:
+ /* None. */
+ break;
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ rtl8168e_hw_phy_config(tp);
+ break;
default:
break;
@@ -2567,9 +2731,6 @@ static void rtl8169_phy_timer(unsigned long __opaque)
assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
- if (!(tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL))
- return;
-
spin_lock_irq(&tp->lock);
if (tp->phy_reset_pending(tp)) {
@@ -2594,28 +2755,6 @@ out_unlock:
spin_unlock_irq(&tp->lock);
}
-static inline void rtl8169_delete_timer(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- struct timer_list *timer = &tp->timer;
-
- if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
- return;
-
- del_timer_sync(timer);
-}
-
-static inline void rtl8169_request_timer(struct net_device *dev)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- struct timer_list *timer = &tp->timer;
-
- if (tp->mac_version <= RTL_GIGA_MAC_VER_01)
- return;
-
- mod_timer(timer, jiffies + RTL8169_PHY_TIMEOUT);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
@@ -2683,11 +2822,11 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
rtl8169_phy_reset(dev, tp);
rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
- ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- (tp->mii.supports_gmii ?
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0));
+ ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+ (tp->mii.supports_gmii ?
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full : 0));
if (RTL_R8(PHYstatus) & TBI_Enable)
netif_info(tp, link, dev, "TBI auto-negotiating\n");
@@ -2740,7 +2879,8 @@ static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
}
-static int rtl_xmii_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
+static int rtl_xmii_ioctl(struct rtl8169_private *tp,
+ struct mii_ioctl_data *data, int cmd)
{
switch (cmd) {
case SIOCGMIIPHY:
@@ -2840,6 +2980,8 @@ static const struct net_device_ops rtl8169_netdev_ops = {
.ndo_tx_timeout = rtl8169_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = rtl8169_change_mtu,
+ .ndo_fix_features = rtl8169_fix_features,
+ .ndo_set_features = rtl8169_set_features,
.ndo_set_mac_address = rtl_set_mac_address,
.ndo_do_ioctl = rtl8169_ioctl,
.ndo_set_multicast_list = rtl_set_rx_mode,
@@ -2859,6 +3001,7 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
ops->read = r8168dp_1_mdio_read;
break;
case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
ops->write = r8168dp_2_mdio_write;
ops->read = r8168dp_2_mdio_read;
break;
@@ -2900,33 +3043,82 @@ static void r810x_pll_power_up(struct rtl8169_private *tp)
static void r8168_phy_power_up(struct rtl8169_private *tp)
{
rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x0e, 0x0000);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ rtl_writephy(tp, 0x0e, 0x0000);
+ break;
+ default:
+ break;
+ }
rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
}
static void r8168_phy_power_down(struct rtl8169_private *tp)
{
rtl_writephy(tp, 0x1f, 0x0000);
- rtl_writephy(tp, 0x0e, 0x0200);
- rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
+ break;
+
+ case RTL_GIGA_MAC_VER_11:
+ case RTL_GIGA_MAC_VER_12:
+ case RTL_GIGA_MAC_VER_17:
+ case RTL_GIGA_MAC_VER_18:
+ case RTL_GIGA_MAC_VER_19:
+ case RTL_GIGA_MAC_VER_20:
+ case RTL_GIGA_MAC_VER_21:
+ case RTL_GIGA_MAC_VER_22:
+ case RTL_GIGA_MAC_VER_23:
+ case RTL_GIGA_MAC_VER_24:
+ case RTL_GIGA_MAC_VER_25:
+ case RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_27:
+ case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ rtl_writephy(tp, 0x0e, 0x0200);
+ default:
+ rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
+ break;
+ }
}
static void r8168_pll_power_down(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
- (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) &&
+ r8168dp_check_dash(tp)) {
return;
}
- if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_24) &&
(RTL_R16(CPlusCmd) & ASF)) {
return;
}
+ if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_33)
+ rtl_ephy_write(ioaddr, 0x19, 0xff64);
+
if (__rtl8169_get_wol(tp) & WAKE_ANY) {
rtl_writephy(tp, 0x1f, 0x0000);
rtl_writephy(tp, MII_BMCR, 0x0000);
@@ -2943,6 +3135,9 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
break;
}
@@ -2952,9 +3147,10 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
- if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
- (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
+ if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) &&
+ r8168dp_check_dash(tp)) {
return;
}
@@ -2963,6 +3159,9 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
break;
}
@@ -3017,6 +3216,9 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
ops->down = r8168_pll_power_down;
ops->up = r8168_pll_power_up;
break;
@@ -3028,6 +3230,22 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
}
}
+static void rtl_hw_reset(struct rtl8169_private *tp)
+{
+ void __iomem *ioaddr = tp->mmio_addr;
+ int i;
+
+ /* Soft reset the chip. */
+ RTL_W8(ChipCmd, CmdReset);
+
+ /* Check that the chip has finished the reset. */
+ for (i = 0; i < 100; i++) {
+ if ((RTL_R8(ChipCmd) & CmdReset) == 0)
+ break;
+ msleep_interruptible(1);
+ }
+}
+
static int __devinit
rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -3037,7 +3255,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct mii_if_info *mii;
struct net_device *dev;
void __iomem *ioaddr;
- unsigned int i;
+ int chipset, i;
int rc;
if (netif_msg_drv(&debug)) {
@@ -3127,6 +3345,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = -EIO;
goto err_out_free_res_3;
}
+ tp->mmio_addr = ioaddr;
tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!tp->pcie_cap)
@@ -3134,22 +3353,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
RTL_W16(IntrMask, 0x0000);
- /* Soft reset the chip. */
- RTL_W8(ChipCmd, CmdReset);
-
- /* Check that the chip has finished the reset. */
- for (i = 0; i < 100; i++) {
- if ((RTL_R8(ChipCmd) & CmdReset) == 0)
- break;
- msleep_interruptible(1);
- }
+ rtl_hw_reset(tp);
RTL_W16(IntrStatus, 0xffff);
pci_set_master(pdev);
/* Identify chip attached to board */
- rtl8169_get_mac_version(tp, ioaddr);
+ rtl8169_get_mac_version(tp, dev, cfg->default_ver);
/*
* Pretend we are using VLANs; This bypasses a nasty bug where
@@ -3161,25 +3372,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_init_mdio_ops(tp);
rtl_init_pll_power_ops(tp);
- /* Use appropriate default if unknown */
- if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- netif_notice(tp, probe, dev,
- "unknown MAC, using family default\n");
- tp->mac_version = cfg->default_ver;
- }
-
rtl8169_print_mac_version(tp);
- for (i = 0; i < ARRAY_SIZE(rtl_chip_info); i++) {
- if (tp->mac_version == rtl_chip_info[i].mac_version)
- break;
- }
- if (i == ARRAY_SIZE(rtl_chip_info)) {
- dev_err(&pdev->dev,
- "driver bug, MAC version not found in rtl_chip_info\n");
- goto err_out_msi_4;
- }
- tp->chipset = i;
+ chipset = tp->mac_version;
+ tp->txd_version = rtl_chip_infos[chipset].txd_version;
RTL_W8(Cfg9346, Cfg9346_Unlock);
RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
@@ -3199,8 +3395,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->phy_reset_pending = rtl8169_tbi_reset_pending;
tp->link_ok = rtl8169_tbi_link_ok;
tp->do_ioctl = rtl_tbi_ioctl;
-
- tp->phy_1000_ctrl_reg = ADVERTISE_1000FULL; /* Implied by TBI */
} else {
tp->set_speed = rtl8169_set_speed_xmii;
tp->get_settings = rtl8169_gset_xmii;
@@ -3212,8 +3406,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&tp->lock);
- tp->mmio_addr = ioaddr;
-
/* Get MAC address */
for (i = 0; i < MAC_ADDR_LEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
@@ -3226,7 +3418,19 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
- dev->features |= NETIF_F_HW_VLAN_TX_RX | NETIF_F_GRO;
+ /* don't enable SG, IP_CSUM and TSO by default - it might not work
+ * properly for all devices */
+ dev->features |= NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
+
+ if (tp->mac_version == RTL_GIGA_MAC_VER_05)
+ /* 8110SCd requires hardware Rx VLAN - disallow toggling */
+ dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
tp->intr_mask = 0xffff;
tp->hw_start = cfg->hw_start;
@@ -3237,6 +3441,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->timer.data = (unsigned long) dev;
tp->timer.function = rtl8169_phy_timer;
+ tp->fw = RTL_FIRMWARE_UNKNOWN;
+
rc = register_netdev(dev);
if (rc < 0)
goto err_out_msi_4;
@@ -3244,12 +3450,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
- rtl_chip_info[tp->chipset].name,
- dev->base_addr, dev->dev_addr,
+ rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
(u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
- if ((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_28)) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
rtl8168_driver_start(tp);
}
@@ -3281,17 +3487,18 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if ((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_28)) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
rtl8168_driver_stop(tp);
}
cancel_delayed_work_sync(&tp->task);
- rtl_release_firmware(tp);
-
unregister_netdev(dev);
+ rtl_release_firmware(tp);
+
if (pci_dev_run_wake(pdev))
pm_runtime_get_noresume(&pdev->dev);
@@ -3303,6 +3510,27 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
+static void rtl_request_firmware(struct rtl8169_private *tp)
+{
+ /* Return early if the firmware is already loaded / cached. */
+ if (IS_ERR(tp->fw)) {
+ const char *name;
+
+ name = rtl_lookup_firmware_name(tp);
+ if (name) {
+ int rc;
+
+ rc = request_firmware(&tp->fw, name, &tp->pci_dev->dev);
+ if (rc >= 0)
+ return;
+
+ netif_warn(tp, ifup, tp->dev, "unable to load "
+ "firmware patch %s (%d)\n", name, rc);
+ }
+ tp->fw = NULL;
+ }
+}
+
static int rtl8169_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -3334,24 +3562,24 @@ static int rtl8169_open(struct net_device *dev)
smp_mb();
+ rtl_request_firmware(tp);
+
retval = request_irq(dev->irq, rtl8169_interrupt,
(tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
dev->name, dev);
if (retval < 0)
- goto err_release_ring_2;
+ goto err_release_fw_2;
napi_enable(&tp->napi);
rtl8169_init_phy(dev, tp);
- rtl8169_vlan_mode(dev);
+ rtl8169_set_features(dev, dev->features);
rtl_pll_power_up(tp);
rtl_hw_start(dev);
- rtl8169_request_timer(dev);
-
tp->saved_wolopts = 0;
pm_runtime_put_noidle(&pdev->dev);
@@ -3359,7 +3587,8 @@ static int rtl8169_open(struct net_device *dev)
out:
return retval;
-err_release_ring_2:
+err_release_fw_2:
+ rtl_release_firmware(tp);
rtl8169_rx_clear(tp);
err_free_rx_1:
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
@@ -3382,7 +3611,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
rtl8169_irq_mask_and_ack(ioaddr);
if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28) {
+ tp->mac_version == RTL_GIGA_MAC_VER_28 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_31) {
while (RTL_R8(TxPoll) & NPQ)
udelay(20);
@@ -3400,7 +3630,7 @@ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
void __iomem *ioaddr = tp->mmio_addr;
u32 cfg = rtl8169_rx_config;
- cfg |= (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ cfg |= (RTL_R32(RxConfig) & RTL_RX_CONFIG_MASK);
RTL_W32(RxConfig, cfg);
/* Set DMA burst size and Interframe Gap Time */
@@ -3411,25 +3641,14 @@ static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
static void rtl_hw_start(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- unsigned int i;
-
- /* Soft reset the chip. */
- RTL_W8(ChipCmd, CmdReset);
- /* Check that the chip has finished the reset. */
- for (i = 0; i < 100; i++) {
- if ((RTL_R8(ChipCmd) & CmdReset) == 0)
- break;
- msleep_interruptible(1);
- }
+ rtl_hw_reset(tp);
tp->hw_start(dev);
netif_start_queue(dev);
}
-
static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
void __iomem *ioaddr)
{
@@ -3495,26 +3714,26 @@ static void rtl_hw_start_8169(struct net_device *dev)
}
RTL_W8(Cfg9346, Cfg9346_Unlock);
- if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_04))
+ if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_04)
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
RTL_W8(EarlyTxThres, NoEarlyTx);
rtl_set_rx_max_size(ioaddr, rx_buf_sz);
- if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_04))
+ if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_04)
rtl_set_rx_tx_config_registers(tp);
tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
- if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_03) {
dprintk("Set MAC Reg C+CR Offset 0xE0. "
"Bit-3 and bit-14 MUST be 1\n");
tp->cp_cmd |= (1 << 14);
@@ -3532,10 +3751,10 @@ static void rtl_hw_start_8169(struct net_device *dev)
rtl_set_rx_tx_desc_registers(tp, ioaddr);
- if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
- (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
+ if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_02 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_03 &&
+ tp->mac_version != RTL_GIGA_MAC_VER_04) {
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_tx_config_registers(tp);
}
@@ -3779,6 +3998,17 @@ static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
+static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ rtl_csi_access_enable_1(ioaddr);
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_disable_clock_request(pdev);
+}
+
static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
{
static const struct ephy_info e_info_8168d_4[] = {
@@ -3805,6 +4035,41 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
rtl_enable_clock_request(pdev);
}
+static void rtl_hw_start_8168e(void __iomem *ioaddr, struct pci_dev *pdev)
+{
+ static const struct ephy_info e_info_8168e[] = {
+ { 0x00, 0x0200, 0x0100 },
+ { 0x00, 0x0000, 0x0004 },
+ { 0x06, 0x0002, 0x0001 },
+ { 0x06, 0x0000, 0x0030 },
+ { 0x07, 0x0000, 0x2000 },
+ { 0x00, 0x0000, 0x0020 },
+ { 0x03, 0x5800, 0x2000 },
+ { 0x03, 0x0000, 0x0001 },
+ { 0x01, 0x0800, 0x1000 },
+ { 0x07, 0x0000, 0x4000 },
+ { 0x1e, 0x0000, 0x2000 },
+ { 0x19, 0xffff, 0xfe6c },
+ { 0x0a, 0x0000, 0x0040 }
+ };
+
+ rtl_csi_access_enable_2(ioaddr);
+
+ rtl_ephy_init(ioaddr, e_info_8168e, ARRAY_SIZE(e_info_8168e));
+
+ rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
+
+ RTL_W8(MaxTxPacketSize, TxPacketMax);
+
+ rtl_disable_clock_request(pdev);
+
+ /* Reset tx FIFO pointer */
+ RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
+ RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
+
+ RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+}
+
static void rtl_hw_start_8168(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -3842,55 +4107,64 @@ static void rtl_hw_start_8168(struct net_device *dev)
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_11:
rtl_hw_start_8168bb(ioaddr, pdev);
- break;
+ break;
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_17:
rtl_hw_start_8168bef(ioaddr, pdev);
- break;
+ break;
case RTL_GIGA_MAC_VER_18:
rtl_hw_start_8168cp_1(ioaddr, pdev);
- break;
+ break;
case RTL_GIGA_MAC_VER_19:
rtl_hw_start_8168c_1(ioaddr, pdev);
- break;
+ break;
case RTL_GIGA_MAC_VER_20:
rtl_hw_start_8168c_2(ioaddr, pdev);
- break;
+ break;
case RTL_GIGA_MAC_VER_21:
rtl_hw_start_8168c_3(ioaddr, pdev);
- break;
+ break;
case RTL_GIGA_MAC_VER_22:
rtl_hw_start_8168c_4(ioaddr, pdev);
- break;
+ break;
case RTL_GIGA_MAC_VER_23:
rtl_hw_start_8168cp_2(ioaddr, pdev);
- break;
+ break;
case RTL_GIGA_MAC_VER_24:
rtl_hw_start_8168cp_3(ioaddr, pdev);
- break;
+ break;
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
case RTL_GIGA_MAC_VER_27:
rtl_hw_start_8168d(ioaddr, pdev);
- break;
+ break;
case RTL_GIGA_MAC_VER_28:
rtl_hw_start_8168d_4(ioaddr, pdev);
- break;
+ break;
+
+ case RTL_GIGA_MAC_VER_31:
+ rtl_hw_start_8168dp(ioaddr, pdev);
+ break;
+
+ case RTL_GIGA_MAC_VER_32:
+ case RTL_GIGA_MAC_VER_33:
+ rtl_hw_start_8168e(ioaddr, pdev);
+ break;
default:
printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
dev->name, tp->mac_version);
- break;
+ break;
}
RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -3974,10 +4248,10 @@ static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
{ 0x0a, 0, 0x0020 }
};
- /* Force LAN exit from ASPM if Rx/Tx are not idel */
+ /* Force LAN exit from ASPM if Rx/Tx are not idle */
RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
- /* disable Early Tally Counter */
+ /* Disable Early Tally Counter */
RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
@@ -3998,8 +4272,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
- if ((tp->mac_version == RTL_GIGA_MAC_VER_13) ||
- (tp->mac_version == RTL_GIGA_MAC_VER_16)) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
+ tp->mac_version == RTL_GIGA_MAC_VER_16) {
int cap = tp->pcie_cap;
if (cap) {
@@ -4062,6 +4336,8 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
return 0;
}
@@ -4299,6 +4575,7 @@ static void rtl8169_reset_task(struct work_struct *work)
struct rtl8169_private *tp =
container_of(work, struct rtl8169_private, task.work);
struct net_device *dev = tp->dev;
+ int i;
rtnl_lock();
@@ -4307,19 +4584,15 @@ static void rtl8169_reset_task(struct work_struct *work)
rtl8169_wait_for_quiescence(dev);
- rtl8169_rx_interrupt(dev, tp, tp->mmio_addr, ~(u32)0);
+ for (i = 0; i < NUM_RX_DESC; i++)
+ rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
+
rtl8169_tx_clear(tp);
- if (tp->dirty_rx == tp->cur_rx) {
- rtl8169_init_ring_indexes(tp);
- rtl_hw_start(dev);
- netif_wake_queue(dev);
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
- } else {
- if (net_ratelimit())
- netif_emerg(tp, intr, dev, "Rx buffers shortage\n");
- rtl8169_schedule_work(dev, rtl8169_reset_task);
- }
+ rtl8169_init_ring_indexes(tp);
+ rtl_hw_start(dev);
+ netif_wake_queue(dev);
+ rtl8169_check_link_status(dev, tp, tp->mmio_addr);
out_unlock:
rtnl_unlock();
@@ -4336,7 +4609,7 @@ static void rtl8169_tx_timeout(struct net_device *dev)
}
static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
- u32 opts1)
+ u32 *opts)
{
struct skb_shared_info *info = skb_shinfo(skb);
unsigned int cur_frag, entry;
@@ -4363,10 +4636,12 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
goto err_out;
}
- /* anti gcc 2.95.3 bugware (sic) */
- status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
+ /* Anti gcc 2.95.3 bugware (sic) */
+ status = opts[0] | len |
+ (RingEnd * !((entry + 1) % NUM_TX_DESC));
txd->opts1 = cpu_to_le32(status);
+ txd->opts2 = cpu_to_le32(opts[1]);
txd->addr = cpu_to_le64(mapping);
tp->tx_skb[entry].len = len;
@@ -4384,24 +4659,26 @@ err_out:
return -EIO;
}
-static inline u32 rtl8169_tso_csum(struct sk_buff *skb, struct net_device *dev)
+static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
+ struct sk_buff *skb, u32 *opts)
{
- if (dev->features & NETIF_F_TSO) {
- u32 mss = skb_shinfo(skb)->gso_size;
+ const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
+ u32 mss = skb_shinfo(skb)->gso_size;
+ int offset = info->opts_offset;
- if (mss)
- return LargeSend | ((mss & MSSMask) << MSSShift);
- }
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (mss) {
+ opts[0] |= TD_LSO;
+ opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP)
- return IPCS | TCPCS;
+ opts[offset] |= info->checksum.tcp;
else if (ip->protocol == IPPROTO_UDP)
- return IPCS | UDPCS;
- WARN_ON(1); /* we need a WARN() */
+ opts[offset] |= info->checksum.udp;
+ else
+ WARN_ON_ONCE(1);
}
- return 0;
}
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -4414,7 +4691,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct device *d = &tp->pci_dev->dev;
dma_addr_t mapping;
u32 status, len;
- u32 opts1;
+ u32 opts[2];
int frags;
if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
@@ -4435,31 +4712,35 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->tx_skb[entry].len = len;
txd->addr = cpu_to_le64(mapping);
- txd->opts2 = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
- opts1 = DescOwn | rtl8169_tso_csum(skb, dev);
+ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+ opts[0] = DescOwn;
- frags = rtl8169_xmit_frags(tp, skb, opts1);
+ rtl8169_tso_csum(tp, skb, opts);
+
+ frags = rtl8169_xmit_frags(tp, skb, opts);
if (frags < 0)
goto err_dma_1;
else if (frags)
- opts1 |= FirstFrag;
+ opts[0] |= FirstFrag;
else {
- opts1 |= FirstFrag | LastFrag;
+ opts[0] |= FirstFrag | LastFrag;
tp->tx_skb[entry].skb = skb;
}
+ txd->opts2 = cpu_to_le32(opts[1]);
+
wmb();
- /* anti gcc 2.95.3 bugware (sic) */
- status = opts1 | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
+ /* Anti gcc 2.95.3 bugware (sic) */
+ status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
txd->opts1 = cpu_to_le32(status);
tp->cur_tx += frags + 1;
wmb();
- RTL_W8(TxPoll, NPQ); /* set polling bit */
+ RTL_W8(TxPoll, NPQ);
if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
netif_stop_queue(dev);
@@ -4616,20 +4897,12 @@ static struct sk_buff *rtl8169_try_rx_copy(void *data,
return skb;
}
-/*
- * Warning : rtl8169_rx_interrupt() might be called :
- * 1) from NAPI (softirq) context
- * (polling = 1 : we should call netif_receive_skb())
- * 2) from process context (rtl8169_reset_task())
- * (polling = 0 : we must call netif_rx() instead)
- */
static int rtl8169_rx_interrupt(struct net_device *dev,
struct rtl8169_private *tp,
void __iomem *ioaddr, u32 budget)
{
unsigned int cur_rx, rx_left;
unsigned int count;
- int polling = (budget != ~(u32)0) ? 1 : 0;
cur_rx = tp->cur_rx;
rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
@@ -4689,10 +4962,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
rtl8169_rx_vlan_tag(desc, skb);
- if (likely(polling))
- napi_gro_receive(&tp->napi, skb);
- else
- netif_rx(skb);
+ napi_gro_receive(&tp->napi, skb);
dev->stats.rx_bytes += pkt_size;
dev->stats.rx_packets++;
@@ -4755,6 +5025,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
case RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_31:
/* Experimental science. Pktgen proof. */
case RTL_GIGA_MAC_VER_12:
case RTL_GIGA_MAC_VER_25:
@@ -4847,7 +5118,7 @@ static void rtl8169_down(struct net_device *dev)
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- rtl8169_delete_timer(dev);
+ del_timer_sync(&tp->timer);
netif_stop_queue(dev);
@@ -4884,7 +5155,7 @@ static int rtl8169_close(struct net_device *dev)
pm_runtime_get_sync(&pdev->dev);
- /* update counters before going down */
+ /* Update counters before going down */
rtl8169_update_counters(dev);
rtl8169_down(dev);
@@ -4939,7 +5210,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
spin_lock_irqsave(&tp->lock, flags);
tmp = rtl8169_rx_config | rx_mode |
- (RTL_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ (RTL_R32(RxConfig) & RTL_RX_CONFIG_MASK);
if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
u32 data = mc_filter[0];
@@ -5077,15 +5348,15 @@ static int rtl8169_runtime_idle(struct device *device)
}
static const struct dev_pm_ops rtl8169_pm_ops = {
- .suspend = rtl8169_suspend,
- .resume = rtl8169_resume,
- .freeze = rtl8169_suspend,
- .thaw = rtl8169_resume,
- .poweroff = rtl8169_suspend,
- .restore = rtl8169_resume,
- .runtime_suspend = rtl8169_runtime_suspend,
- .runtime_resume = rtl8169_runtime_resume,
- .runtime_idle = rtl8169_runtime_idle,
+ .suspend = rtl8169_suspend,
+ .resume = rtl8169_resume,
+ .freeze = rtl8169_suspend,
+ .thaw = rtl8169_resume,
+ .poweroff = rtl8169_suspend,
+ .restore = rtl8169_resume,
+ .runtime_suspend = rtl8169_runtime_suspend,
+ .runtime_resume = rtl8169_runtime_resume,
+ .runtime_idle = rtl8169_runtime_idle,
};
#define RTL8169_PM_OPS (&rtl8169_pm_ops)
@@ -5104,7 +5375,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
rtl8169_net_suspend(dev);
- /* restore original MAC address */
+ /* Restore original MAC address */
rtl_rar_set(tp, dev->perm_addr);
spin_lock_irq(&tp->lock);
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 356e74d20b80..89cfee7e8643 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2244,13 +2244,12 @@ static int verify_xena_quiescence(struct s2io_nic *sp)
static void fix_mac_address(struct s2io_nic *sp)
{
struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64;
int i = 0;
while (fix_mac[i] != END_SIGN) {
writeq(fix_mac[i++], &bar0->gpio_control);
udelay(10);
- val64 = readq(&bar0->gpio_control);
+ (void) readq(&bar0->gpio_control);
}
}
@@ -2353,7 +2352,7 @@ static int start_nic(struct s2io_nic *nic)
if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
/*
- * Dont see link state interrupts initally on some switches,
+ * Dont see link state interrupts initially on some switches,
* so directly scheduling the link state task here.
*/
schedule_work(&nic->set_link_task);
@@ -2727,7 +2726,6 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
int j;
struct sk_buff *skb;
struct RxD_t *rxdp;
- struct buffAdd *ba;
struct RxD1 *rxdp1;
struct RxD3 *rxdp3;
struct mac_info *mac_control = &sp->mac_control;
@@ -2751,7 +2749,6 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
memset(rxdp, 0, sizeof(struct RxD1));
} else if (sp->rxd_mode == RXD_MODE_3B) {
rxdp3 = (struct RxD3 *)rxdp;
- ba = &mac_control->rings[ring_no].ba[blk][j];
pci_unmap_single(sp->pdev,
(dma_addr_t)rxdp3->Buffer0_ptr,
BUF0_LEN,
@@ -3563,7 +3560,7 @@ static void s2io_reset(struct s2io_nic *sp)
}
/*
- * Clear spurious ECC interrupts that would have occured on
+ * Clear spurious ECC interrupts that would have occurred on
* XFRAME II cards after reset.
*/
if (sp->device_type == XFRAME_II_DEVICE) {
@@ -4065,7 +4062,7 @@ static int s2io_close(struct net_device *dev)
* Description :
* This function is the Tx entry point of the driver. S2IO NIC supports
* certain protocol assist features on Tx side, namely CSO, S/G, LSO.
- * NOTE: when device cant queue the pkt,just the trans_start variable will
+ * NOTE: when device can't queue the pkt,just the trans_start variable will
* not be upadted.
* Return value:
* 0 on success & 1 on failure.
@@ -5383,7 +5380,7 @@ static int s2io_ethtool_sset(struct net_device *dev,
{
struct s2io_nic *sp = netdev_priv(dev);
if ((info->autoneg == AUTONEG_ENABLE) ||
- (info->speed != SPEED_10000) ||
+ (ethtool_cmd_speed(info) != SPEED_10000) ||
(info->duplex != DUPLEX_FULL))
return -EINVAL;
else {
@@ -5417,10 +5414,10 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
info->transceiver = XCVR_EXTERNAL;
if (netif_carrier_ok(sp->dev)) {
- info->speed = 10000;
+ ethtool_cmd_speed_set(info, SPEED_10000);
info->duplex = DUPLEX_FULL;
} else {
- info->speed = -1;
+ ethtool_cmd_speed_set(info, -1);
info->duplex = -1;
}
@@ -5484,83 +5481,79 @@ static void s2io_ethtool_gregs(struct net_device *dev,
}
}
-/**
- * s2io_phy_id - timer function that alternates adapter LED.
- * @data : address of the private member of the device structure, which
- * is a pointer to the s2io_nic structure, provided as an u32.
- * Description: This is actually the timer function that alternates the
- * adapter LED bit of the adapter control bit to set/reset every time on
- * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
- * once every second.
+/*
+ * s2io_set_led - control NIC led
*/
-static void s2io_phy_id(unsigned long data)
+static void s2io_set_led(struct s2io_nic *sp, bool on)
{
- struct s2io_nic *sp = (struct s2io_nic *)data;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u64 val64 = 0;
- u16 subid;
+ u16 subid = sp->pdev->subsystem_device;
+ u64 val64;
- subid = sp->pdev->subsystem_device;
if ((sp->device_type == XFRAME_II_DEVICE) ||
((subid & 0xFF) >= 0x07)) {
val64 = readq(&bar0->gpio_control);
- val64 ^= GPIO_CTRL_GPIO_0;
+ if (on)
+ val64 |= GPIO_CTRL_GPIO_0;
+ else
+ val64 &= ~GPIO_CTRL_GPIO_0;
+
writeq(val64, &bar0->gpio_control);
} else {
val64 = readq(&bar0->adapter_control);
- val64 ^= ADAPTER_LED_ON;
+ if (on)
+ val64 |= ADAPTER_LED_ON;
+ else
+ val64 &= ~ADAPTER_LED_ON;
+
writeq(val64, &bar0->adapter_control);
}
- mod_timer(&sp->id_timer, jiffies + HZ / 2);
}
/**
- * s2io_ethtool_idnic - To physically identify the nic on the system.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
- * @id : pointer to the structure with identification parameters given by
- * ethtool.
+ * s2io_ethtool_set_led - To physically identify the nic on the system.
+ * @dev : network device
+ * @state: led setting
+ *
* Description: Used to physically identify the NIC on the system.
* The Link LED will blink for a time specified by the user for
* identification.
* NOTE: The Link has to be Up to be able to blink the LED. Hence
* identification is possible only if it's link is up.
- * Return value:
- * int , returns 0 on success
*/
-static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
+static int s2io_ethtool_set_led(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
- u64 val64 = 0, last_gpio_ctrl_val;
struct s2io_nic *sp = netdev_priv(dev);
struct XENA_dev_config __iomem *bar0 = sp->bar0;
- u16 subid;
+ u16 subid = sp->pdev->subsystem_device;
- subid = sp->pdev->subsystem_device;
- last_gpio_ctrl_val = readq(&bar0->gpio_control);
if ((sp->device_type == XFRAME_I_DEVICE) && ((subid & 0xFF) < 0x07)) {
- val64 = readq(&bar0->adapter_control);
+ u64 val64 = readq(&bar0->adapter_control);
if (!(val64 & ADAPTER_CNTL_EN)) {
pr_err("Adapter Link down, cannot blink LED\n");
- return -EFAULT;
+ return -EAGAIN;
}
}
- if (sp->id_timer.function == NULL) {
- init_timer(&sp->id_timer);
- sp->id_timer.function = s2io_phy_id;
- sp->id_timer.data = (unsigned long)sp;
- }
- mod_timer(&sp->id_timer, jiffies);
- if (data)
- msleep_interruptible(data * HZ);
- else
- msleep_interruptible(MAX_FLICKER_TIME);
- del_timer_sync(&sp->id_timer);
- if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
- writeq(last_gpio_ctrl_val, &bar0->gpio_control);
- last_gpio_ctrl_val = readq(&bar0->gpio_control);
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ sp->adapt_ctrl_org = readq(&bar0->gpio_control);
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ s2io_set_led(sp, true);
+ break;
+
+ case ETHTOOL_ID_OFF:
+ s2io_set_led(sp, false);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid))
+ writeq(sp->adapt_ctrl_org, &bar0->gpio_control);
}
return 0;
@@ -6625,25 +6618,6 @@ static int s2io_ethtool_get_regs_len(struct net_device *dev)
}
-static u32 s2io_ethtool_get_rx_csum(struct net_device *dev)
-{
- struct s2io_nic *sp = netdev_priv(dev);
-
- return sp->rx_csum;
-}
-
-static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct s2io_nic *sp = netdev_priv(dev);
-
- if (data)
- sp->rx_csum = 1;
- else
- sp->rx_csum = 0;
-
- return 0;
-}
-
static int s2io_get_eeprom_len(struct net_device *dev)
{
return XENA_EEPROM_SPACE;
@@ -6695,61 +6669,27 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
}
}
-static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_IP_CSUM;
- else
- dev->features &= ~NETIF_F_IP_CSUM;
-
- return 0;
-}
-
-static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
-{
- return (dev->features & NETIF_F_TSO) != 0;
-}
-
-static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
- else
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
-
- return 0;
-}
-
-static int s2io_ethtool_set_flags(struct net_device *dev, u32 data)
+static int s2io_set_features(struct net_device *dev, u32 features)
{
struct s2io_nic *sp = netdev_priv(dev);
- int rc = 0;
- int changed = 0;
-
- if (ethtool_invalid_flags(dev, data, ETH_FLAG_LRO))
- return -EINVAL;
-
- if (data & ETH_FLAG_LRO) {
- if (!(dev->features & NETIF_F_LRO)) {
- dev->features |= NETIF_F_LRO;
- changed = 1;
- }
- } else if (dev->features & NETIF_F_LRO) {
- dev->features &= ~NETIF_F_LRO;
- changed = 1;
- }
+ u32 changed = (features ^ dev->features) & NETIF_F_LRO;
if (changed && netif_running(dev)) {
+ int rc;
+
s2io_stop_all_tx_queue(sp);
s2io_card_down(sp);
+ dev->features = features;
rc = s2io_card_up(sp);
if (rc)
s2io_reset(sp);
else
s2io_start_all_tx_queue(sp);
+
+ return rc ? rc : 1;
}
- return rc;
+ return 0;
}
static const struct ethtool_ops netdev_ethtool_ops = {
@@ -6765,18 +6705,9 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_ringparam = s2io_ethtool_gringparam,
.get_pauseparam = s2io_ethtool_getpause_data,
.set_pauseparam = s2io_ethtool_setpause_data,
- .get_rx_csum = s2io_ethtool_get_rx_csum,
- .set_rx_csum = s2io_ethtool_set_rx_csum,
- .set_tx_csum = s2io_ethtool_op_set_tx_csum,
- .set_flags = s2io_ethtool_set_flags,
- .get_flags = ethtool_op_get_flags,
- .set_sg = ethtool_op_set_sg,
- .get_tso = s2io_ethtool_op_get_tso,
- .set_tso = s2io_ethtool_op_set_tso,
- .set_ufo = ethtool_op_set_ufo,
.self_test = s2io_ethtool_test,
.get_strings = s2io_ethtool_get_strings,
- .phys_id = s2io_ethtool_idnic,
+ .set_phys_id = s2io_ethtool_set_led,
.get_ethtool_stats = s2io_get_ethtool_stats,
.get_sset_count = s2io_get_sset_count,
};
@@ -7545,7 +7476,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
((!ring_data->lro) ||
(ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
- (sp->rx_csum)) {
+ (dev->features & NETIF_F_RXCSUM)) {
l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
@@ -7806,6 +7737,7 @@ static const struct net_device_ops s2io_netdev_ops = {
.ndo_do_ioctl = s2io_ioctl,
.ndo_set_mac_address = s2io_set_mac_addr,
.ndo_change_mtu = s2io_change_mtu,
+ .ndo_set_features = s2io_set_features,
.ndo_vlan_rx_register = s2io_vlan_rx_register,
.ndo_vlan_rx_kill_vid = s2io_vlan_rx_kill_vid,
.ndo_tx_timeout = s2io_tx_watchdog,
@@ -8047,17 +7979,18 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Driver entry points */
dev->netdev_ops = &s2io_netdev_ops;
SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->features |= NETIF_F_LRO;
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM | NETIF_F_LRO;
+ dev->features |= dev->hw_features |
+ NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ if (sp->device_type & XFRAME_II_DEVICE) {
+ dev->hw_features |= NETIF_F_UFO;
+ if (ufo)
+ dev->features |= NETIF_F_UFO;
+ }
if (sp->high_dma_flag == true)
dev->features |= NETIF_F_HIGHDMA;
- dev->features |= NETIF_F_TSO;
- dev->features |= NETIF_F_TSO6;
- if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
- dev->features |= NETIF_F_UFO;
- dev->features |= NETIF_F_HW_CSUM;
- }
dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
INIT_WORK(&sp->set_link_task, s2io_set_link);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 7d160306b651..800b3a44e653 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -376,7 +376,7 @@ static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
/* Maintains Per FIFO related information. */
struct tx_fifo_config {
#define MAX_AVAILABLE_TXDS 8192
- u32 fifo_len; /* specifies len of FIFO upto 8192, ie no of TxDLs */
+ u32 fifo_len; /* specifies len of FIFO up to 8192, ie no of TxDLs */
/* Priority definition */
#define TX_FIFO_PRI_0 0 /*Highest */
#define TX_FIFO_PRI_1 1
@@ -893,9 +893,6 @@ struct s2io_nic {
u16 all_multi_pos;
u16 promisc_flg;
- /* Id timer, used to blink NIC to physically identify NIC. */
- struct timer_list id_timer;
-
/* Restart timer, used to restart NIC if the device is stuck and
* a schedule task that will set the correct Link state once the
* NIC's PHY has stabilized after a state change.
@@ -1005,18 +1002,16 @@ static inline void writeq(u64 val, void __iomem *addr)
#define LF 2
static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
{
- u32 ret;
-
if (order == LF) {
writel((u32) (val), addr);
- ret = readl(addr);
+ (void) readl(addr);
writel((u32) (val >> 32), (addr + 4));
- ret = readl(addr + 4);
+ (void) readl(addr + 4);
} else {
writel((u32) (val >> 32), (addr + 4));
- ret = readl(addr + 4);
+ (void) readl(addr + 4);
writel((u32) (val), addr);
- ret = readl(addr);
+ (void) readl(addr);
}
}
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 76290a8c3c14..fa74314ef789 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1173,7 +1173,8 @@ static int sc92031_ethtool_get_settings(struct net_device *dev,
if (phy_ctrl & PhyCtrlAne)
cmd->advertising |= ADVERTISED_Autoneg;
- cmd->speed = (output_status & 0x2) ? SPEED_100 : SPEED_10;
+ ethtool_cmd_speed_set(cmd,
+ (output_status & 0x2) ? SPEED_100 : SPEED_10);
cmd->duplex = (output_status & 0x4) ? DUPLEX_FULL : DUPLEX_HALF;
cmd->port = PORT_MII;
cmd->phy_address = phy_address;
@@ -1188,10 +1189,11 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
{
struct sc92031_priv *priv = netdev_priv(dev);
void __iomem *port_base = priv->port_base;
+ u32 speed = ethtool_cmd_speed(cmd);
u32 phy_ctrl;
u32 old_phy_ctrl;
- if (!(cmd->speed == SPEED_10 || cmd->speed == SPEED_100))
+ if (!(speed == SPEED_10 || speed == SPEED_100))
return -EINVAL;
if (!(cmd->duplex == DUPLEX_HALF || cmd->duplex == DUPLEX_FULL))
return -EINVAL;
@@ -1229,7 +1231,7 @@ static int sc92031_ethtool_set_settings(struct net_device *dev,
// FIXME: Whole branch guessed
phy_ctrl = 0;
- if (cmd->speed == SPEED_10)
+ if (speed == SPEED_10)
phy_ctrl |= PhyCtrlSpd10;
else /* cmd->speed == SPEED_100 */
phy_ctrl |= PhyCtrlSpd100;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index d890679e4c4d..38a55e9e89e4 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -328,7 +328,8 @@ static int efx_poll(struct napi_struct *napi, int budget)
* processing to finish, then directly poll (and ack ) the eventq.
* Finally reenable NAPI and interrupts.
*
- * Since we are touching interrupts the caller should hold the suspend lock
+ * This is for use only during a loopback self-test. It must not
+ * deliver any packets up the stack as this can result in deadlock.
*/
void efx_process_channel_now(struct efx_channel *channel)
{
@@ -336,6 +337,7 @@ void efx_process_channel_now(struct efx_channel *channel)
BUG_ON(channel->channel >= efx->n_channels);
BUG_ON(!channel->enabled);
+ BUG_ON(!efx->loopback_selftest);
/* Disable interrupts and wait for ISRs to complete */
efx_nic_disable_interrupts(efx);
@@ -1436,7 +1438,7 @@ static void efx_start_all(struct efx_nic *efx)
* restart the transmit interface early so the watchdog timer stops */
efx_start_port(efx);
- if (efx_dev_registered(efx))
+ if (efx_dev_registered(efx) && !efx->port_inhibited)
netif_tx_wake_all_queues(efx->net_dev);
efx_for_each_channel(channel, efx)
@@ -1874,6 +1876,17 @@ static void efx_set_multicast_list(struct net_device *net_dev)
/* Otherwise efx_start_port() will do this */
}
+static int efx_set_features(struct net_device *net_dev, u32 data)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ /* If disabling RX n-tuple filtering, clear existing filters */
+ if (net_dev->features & ~data & NETIF_F_NTUPLE)
+ efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+
+ return 0;
+}
+
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
@@ -1885,6 +1898,7 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_change_mtu = efx_change_mtu,
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_multicast_list = efx_set_multicast_list,
+ .ndo_set_features = efx_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
#endif
@@ -2233,7 +2247,7 @@ static bool efx_port_dummy_op_poll(struct efx_nic *efx)
return false;
}
-static struct efx_phy_operations efx_dummy_phy_operations = {
+static const struct efx_phy_operations efx_dummy_phy_operations = {
.init = efx_port_dummy_op_int,
.reconfigure = efx_port_dummy_op_int,
.poll = efx_port_dummy_op_poll,
@@ -2249,7 +2263,7 @@ static struct efx_phy_operations efx_dummy_phy_operations = {
/* This zeroes out and then fills in the invariants in a struct
* efx_nic (including all sub-structures).
*/
-static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
+static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type,
struct pci_dev *pci_dev, struct net_device *net_dev)
{
int i;
@@ -2269,7 +2283,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
- efx->rx_checksum_enabled = true;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->mac_op = type->default_mac_ops;
@@ -2440,7 +2453,7 @@ static int efx_pci_probe_main(struct efx_nic *efx)
static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *entry)
{
- struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
+ const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data;
struct net_device *net_dev;
struct efx_nic *efx;
int i, rc;
@@ -2452,12 +2465,15 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
return -ENOMEM;
net_dev->features |= (type->offload_features | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_TSO |
- NETIF_F_GRO);
+ NETIF_F_RXCSUM);
if (type->offload_features & NETIF_F_V6_CSUM)
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_TSO);
+ NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
+ NETIF_F_RXCSUM);
+ /* All offloads can be toggled */
+ net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
efx = netdev_priv(net_dev);
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 807178ef65ad..8c5e0052c44a 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -178,19 +178,27 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
*/
/* Identify device by flashing LEDs */
-static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count)
+static int efx_ethtool_phys_id(struct net_device *net_dev,
+ enum ethtool_phys_id_state state)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ enum efx_led_mode mode = EFX_LED_DEFAULT;
- do {
- efx->type->set_id_led(efx, EFX_LED_ON);
- schedule_timeout_interruptible(HZ / 2);
-
- efx->type->set_id_led(efx, EFX_LED_OFF);
- schedule_timeout_interruptible(HZ / 2);
- } while (!signal_pending(current) && --count != 0);
+ switch (state) {
+ case ETHTOOL_ID_ON:
+ mode = EFX_LED_ON;
+ break;
+ case ETHTOOL_ID_OFF:
+ mode = EFX_LED_OFF;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ mode = EFX_LED_DEFAULT;
+ break;
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+ }
- efx->type->set_id_led(efx, EFX_LED_DEFAULT);
+ efx->type->set_id_led(efx, mode);
return 0;
}
@@ -211,7 +219,7 @@ static int efx_ethtool_get_settings(struct net_device *net_dev,
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
if (LOOPBACK_INTERNAL(efx)) {
- ecmd->speed = link_state->speed;
+ ethtool_cmd_speed_set(ecmd, link_state->speed);
ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
}
@@ -226,7 +234,8 @@ static int efx_ethtool_set_settings(struct net_device *net_dev,
int rc;
/* GMAC does not support 1000Mbps HD */
- if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
+ if ((ethtool_cmd_speed(ecmd) == SPEED_1000) &&
+ (ecmd->duplex != DUPLEX_FULL)) {
netif_dbg(efx, drv, efx->net_dev,
"rejecting unsupported 1000Mbps HD setting\n");
return -EINVAL;
@@ -518,72 +527,6 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
}
}
-static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable)
-{
- struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev);
- u32 features;
-
- features = NETIF_F_TSO;
- if (efx->type->offload_features & NETIF_F_V6_CSUM)
- features |= NETIF_F_TSO6;
-
- if (enable)
- net_dev->features |= features;
- else
- net_dev->features &= ~features;
-
- return 0;
-}
-
-static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- u32 features = efx->type->offload_features & NETIF_F_ALL_CSUM;
-
- if (enable)
- net_dev->features |= features;
- else
- net_dev->features &= ~features;
-
- return 0;
-}
-
-static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- /* No way to stop the hardware doing the checks; we just
- * ignore the result.
- */
- efx->rx_checksum_enabled = !!enable;
-
- return 0;
-}
-
-static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
-
- return efx->rx_checksum_enabled;
-}
-
-static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- u32 supported = (efx->type->offload_features &
- (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE));
- int rc;
-
- rc = ethtool_op_set_flags(net_dev, data, supported);
- if (rc)
- return rc;
-
- if (!(data & ETH_FLAG_NTUPLE))
- efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
-
- return 0;
-}
-
static void efx_ethtool_self_test(struct net_device *net_dev,
struct ethtool_test *test, u64 *data)
{
@@ -1070,22 +1013,10 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_ringparam = efx_ethtool_set_ringparam,
.get_pauseparam = efx_ethtool_get_pauseparam,
.set_pauseparam = efx_ethtool_set_pauseparam,
- .get_rx_csum = efx_ethtool_get_rx_csum,
- .set_rx_csum = efx_ethtool_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- /* Need to enable/disable IPv6 too */
- .set_tx_csum = efx_ethtool_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- /* Need to enable/disable TSO-IPv6 too */
- .set_tso = efx_ethtool_set_tso,
- .get_flags = ethtool_op_get_flags,
- .set_flags = efx_ethtool_set_flags,
.get_sset_count = efx_ethtool_get_sset_count,
.self_test = efx_ethtool_self_test,
.get_strings = efx_ethtool_get_strings,
- .phys_id = efx_ethtool_phys_id,
+ .set_phys_id = efx_ethtool_phys_id,
.get_ethtool_stats = efx_ethtool_get_stats,
.get_wol = efx_ethtool_get_wol,
.set_wol = efx_ethtool_set_wol,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 734fcfb52e85..60176e873d62 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -692,7 +692,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
efx_oword_t md_stat;
int count;
- /* wait upto 50ms - taken max from datasheet */
+ /* wait up to 50ms - taken max from datasheet */
for (count = 0; count < 5000; count++) {
efx_reado(efx, &md_stat, FR_AB_MD_STAT);
if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
@@ -1221,7 +1221,7 @@ static int falcon_reset_sram(struct efx_nic *efx)
return 0;
}
- } while (++count < 20); /* wait upto 0.4 sec */
+ } while (++count < 20); /* wait up to 0.4 sec */
netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
return -ETIMEDOUT;
@@ -1703,7 +1703,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
**************************************************************************
*/
-struct efx_nic_type falcon_a1_nic_type = {
+const struct efx_nic_type falcon_a1_nic_type = {
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
@@ -1744,7 +1744,7 @@ struct efx_nic_type falcon_a1_nic_type = {
.reset_world_flags = ETH_RESET_IRQ,
};
-struct efx_nic_type falcon_b0_nic_type = {
+const struct efx_nic_type falcon_b0_nic_type = {
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 2c9ee5db3bf7..9516452c079c 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -362,7 +362,7 @@ void falcon_poll_xmac(struct efx_nic *efx)
falcon_ack_status_intr(efx);
}
-struct efx_mac_operations falcon_xmac_operations = {
+const struct efx_mac_operations falcon_xmac_operations = {
.reconfigure = falcon_reconfigure_xmac,
.update_stats = falcon_update_stats_xmac,
.check_fault = falcon_xmac_check_fault,
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index d9d8c2ef1074..cc978803d484 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
spin_lock_irqsave(&efx->biu_lock, flags);
value->u32[0] = _efx_readd(efx, reg + 0);
+ rmb();
value->u32[1] = _efx_readd(efx, reg + 4);
value->u32[2] = _efx_readd(efx, reg + 8);
value->u32[3] = _efx_readd(efx, reg + 12);
@@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
#else
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
+ rmb();
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
#endif
spin_unlock_irqrestore(&efx->biu_lock, flags);
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
index 6886cdf87c12..d6a255d0856b 100644
--- a/drivers/net/sfc/mac.h
+++ b/drivers/net/sfc/mac.h
@@ -13,8 +13,8 @@
#include "net_driver.h"
-extern struct efx_mac_operations falcon_xmac_operations;
-extern struct efx_mac_operations efx_mcdi_mac_operations;
+extern const struct efx_mac_operations falcon_xmac_operations;
+extern const struct efx_mac_operations efx_mcdi_mac_operations;
extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
u32 dma_len, int enable, int clear);
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 5e118f0d2479..d98479030ef2 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -453,7 +453,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
*
* There's a race here with efx_mcdi_rpc(), because we might receive
* a REBOOT event *before* the request has been copied out. In polled
- * mode (during startup) this is irrelevent, because efx_mcdi_complete()
+ * mode (during startup) this is irrelevant, because efx_mcdi_complete()
* is ignored. In event mode, this condition is just an edge-case of
* receiving a REBOOT event after posting the MCDI request. Did the mc
* reboot before or after the copyout? The best we can do always is
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 33f7294edb47..50c20777a564 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -138,7 +138,7 @@ static bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
}
-struct efx_mac_operations efx_mcdi_mac_operations = {
+const struct efx_mac_operations efx_mcdi_mac_operations = {
.reconfigure = efx_mcdi_mac_reconfigure,
.update_stats = efx_port_dummy_op_void,
.check_fault = efx_mcdi_mac_check_fault,
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index b86a15f221ad..41fe06fa0600 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -103,7 +103,7 @@
*
* If Code==CMDDONE, then the fields are further interpreted as:
*
- * - LEVEL==INFO Command succeded
+ * - LEVEL==INFO Command succeeded
* - LEVEL==ERR Command failed
*
* 0 8 16 24 32
@@ -572,7 +572,7 @@
(4*(_numwords))
/* MC_CMD_SET_RAND_SEED:
- * Set the 16byte seed for the MC psuedo-random generator
+ * Set the 16byte seed for the MC pseudo-random generator
*/
#define MC_CMD_SET_RAND_SEED 0x1a
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
@@ -1162,7 +1162,7 @@
#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
-/* Remaining PERIOD* fields only relevent when PERIODIC_CHANGE is set */
+/* Remaining PERIOD* fields only relevant when PERIODIC_CHANGE is set */
#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index ec3f740f5465..6c63ab0710af 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -449,7 +449,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
- /* The link partner capabilities are only relevent if the
+ /* The link partner capabilities are only relevant if the
* link supports flow control autonegotiation */
if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
return;
@@ -513,7 +513,7 @@ static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *e
ecmd->supported =
mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap);
ecmd->advertising = efx->link_advertising;
- ecmd->speed = efx->link_state.speed;
+ ethtool_cmd_speed_set(ecmd, efx->link_state.speed);
ecmd->duplex = efx->link_state.fd;
ecmd->port = mcdi_to_ethtool_media(phy_cfg->media);
ecmd->phy_address = phy_cfg->port;
@@ -545,7 +545,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
caps = (ethtool_to_mcdi_cap(ecmd->advertising) |
1 << MC_CMD_PHY_CAP_AN_LBN);
} else if (ecmd->duplex) {
- switch (ecmd->speed) {
+ switch (ethtool_cmd_speed(ecmd)) {
case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
@@ -553,7 +553,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
default: return -EINVAL;
}
} else {
- switch (ecmd->speed) {
+ switch (ethtool_cmd_speed(ecmd)) {
case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
@@ -739,7 +739,7 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
return NULL;
}
-struct efx_phy_operations efx_mcdi_phy_ops = {
+const struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
.reconfigure = efx_mcdi_phy_reconfigure,
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 19e68c26d103..71159145b4bf 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -232,12 +232,12 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
*/
int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
- struct ethtool_cmd prev;
+ struct ethtool_cmd prev = { .cmd = ETHTOOL_GSET };
efx->phy_op->get_settings(efx, &prev);
if (ecmd->advertising == prev.advertising &&
- ecmd->speed == prev.speed &&
+ ethtool_cmd_speed(ecmd) == ethtool_cmd_speed(&prev) &&
ecmd->duplex == prev.duplex &&
ecmd->port == prev.port &&
ecmd->autoneg == prev.autoneg)
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 215d5c51bfa0..5718260298c4 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -330,7 +330,6 @@ enum efx_rx_alloc_method {
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
* @last_eventq_read_ptr: Last event queue read pointer value.
- * @magic_count: Event queue test event count
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
@@ -360,7 +359,6 @@ struct efx_channel {
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
unsigned int last_eventq_read_ptr;
- unsigned int magic_count;
unsigned int irq_count;
unsigned int irq_mod_score;
@@ -670,7 +668,7 @@ struct efx_filter_state;
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @fatal_irq_level: IRQ level (bit number) used for serious errors
* @mtd_list: List of MTDs attached to the NIC
- * @nic_data: Hardware dependant state
+ * @nic_data: Hardware dependent state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
* @port_inhibited, efx_monitor() and efx_reconfigure_port()
* @port_enabled: Port enabled indicator.
@@ -681,7 +679,6 @@ struct efx_filter_state;
* @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
- * @rx_checksum_enabled: RX checksumming enabled
* @stats_buffer: DMA buffer for statistics
* @mac_op: MAC interface
* @phy_type: PHY type
@@ -771,14 +768,13 @@ struct efx_nic {
bool port_initialized;
struct net_device *net_dev;
- bool rx_checksum_enabled;
struct efx_buffer stats_buffer;
- struct efx_mac_operations *mac_op;
+ const struct efx_mac_operations *mac_op;
unsigned int phy_type;
- struct efx_phy_operations *phy_op;
+ const struct efx_phy_operations *phy_op;
void *phy_data;
struct mdio_if_info mdio;
unsigned int mdio_bus;
@@ -899,7 +895,7 @@ struct efx_nic_type {
void (*resume_wol)(struct efx_nic *efx);
int (*test_registers)(struct efx_nic *efx);
int (*test_nvram)(struct efx_nic *efx);
- struct efx_mac_operations *default_mac_ops;
+ const struct efx_mac_operations *default_mac_ops;
int revision;
unsigned int mem_map_size;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index e8396614daf3..5ac9fa2cd3bc 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -84,7 +84,8 @@ static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
static inline efx_qword_t *efx_event(struct efx_channel *channel,
unsigned int index)
{
- return ((efx_qword_t *) (channel->eventq.addr)) + index;
+ return ((efx_qword_t *) (channel->eventq.addr)) +
+ (index & channel->eventq_mask);
}
/* See if an event is present
@@ -673,7 +674,8 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
efx_dword_t reg;
struct efx_nic *efx = channel->efx;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
channel->channel);
}
@@ -850,7 +852,6 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
unsigned expected_ptr;
bool rx_ev_pkt_ok, discard = false, checksummed;
struct efx_rx_queue *rx_queue;
- struct efx_nic *efx = channel->efx;
/* Basic packet information */
rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
@@ -873,9 +874,8 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
* UDP/IP, then we can rely on the hardware checksum.
*/
checksummed =
- likely(efx->rx_checksum_enabled) &&
- (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
- rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP);
+ rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP ||
+ rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP;
} else {
efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard);
checksummed = false;
@@ -908,7 +908,7 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
if (code == EFX_CHANNEL_MAGIC_TEST(channel))
- ++channel->magic_count;
+ ; /* ignore */
else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
/* The queue must be empty, so we won't receive any rx
* events, so efx_process_channel() won't refill the
@@ -1015,8 +1015,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
/* Clear this event by marking it all ones */
EFX_SET_QWORD(*p_event);
- /* Increment read pointer */
- read_ptr = (read_ptr + 1) & channel->eventq_mask;
+ ++read_ptr;
ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
@@ -1060,6 +1059,13 @@ out:
return spent;
}
+/* Check whether an event is present in the eventq at the current
+ * read pointer. Only useful for self-test.
+ */
+bool efx_nic_event_present(struct efx_channel *channel)
+{
+ return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
+}
/* Allocate buffer table entries for event queue */
int efx_nic_probe_eventq(struct efx_channel *channel)
@@ -1165,7 +1171,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
unsigned int read_ptr = channel->eventq_read_ptr;
- unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask;
+ unsigned int end_ptr = read_ptr + channel->eventq_mask - 1;
do {
efx_qword_t *event = efx_event(channel, read_ptr);
@@ -1205,7 +1211,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
* it's ok to throw away every non-flush event */
EFX_SET_QWORD(*event);
- read_ptr = (read_ptr + 1) & channel->eventq_mask;
+ ++read_ptr;
} while (read_ptr != end_ptr);
channel->eventq_read_ptr = read_ptr;
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index d9de1b647d41..7443f99c977f 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -150,9 +150,9 @@ struct siena_nic_data {
int wol_filter_id;
};
-extern struct efx_nic_type falcon_a1_nic_type;
-extern struct efx_nic_type falcon_b0_nic_type;
-extern struct efx_nic_type siena_a0_nic_type;
+extern const struct efx_nic_type falcon_a1_nic_type;
+extern const struct efx_nic_type falcon_b0_nic_type;
+extern const struct efx_nic_type siena_a0_nic_type;
/**************************************************************************
*
@@ -184,6 +184,7 @@ extern void efx_nic_fini_eventq(struct efx_channel *channel);
extern void efx_nic_remove_eventq(struct efx_channel *channel);
extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
+extern bool efx_nic_event_present(struct efx_channel *channel);
/* MAC/PHY */
extern void falcon_drain_tx_fifo(struct efx_nic *efx);
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index b3b79472421e..11d148cd8441 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -13,14 +13,14 @@
/****************************************************************************
* 10Xpress (SFX7101) PHY
*/
-extern struct efx_phy_operations falcon_sfx7101_phy_ops;
+extern const struct efx_phy_operations falcon_sfx7101_phy_ops;
extern void tenxpress_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
/****************************************************************************
* AMCC/Quake QT202x PHYs
*/
-extern struct efx_phy_operations falcon_qt202x_phy_ops;
+extern const struct efx_phy_operations falcon_qt202x_phy_ops;
/* These PHYs provide various H/W control states for LEDs */
#define QUAKE_LED_LINK_INVAL (0)
@@ -39,7 +39,7 @@ extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
/****************************************************************************
* Transwitch CX4 retimer
*/
-extern struct efx_phy_operations falcon_txc_phy_ops;
+extern const struct efx_phy_operations falcon_txc_phy_ops;
#define TXC_GPIO_DIR_INPUT 0
#define TXC_GPIO_DIR_OUTPUT 1
@@ -50,7 +50,7 @@ extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
/****************************************************************************
* Siena managed PHYs
*/
-extern struct efx_phy_operations efx_mcdi_phy_ops;
+extern const struct efx_phy_operations efx_mcdi_phy_ops;
extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
unsigned int prtad, unsigned int devad,
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 55f90924247e..7ad97e397406 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -449,7 +449,7 @@ static void qt202x_phy_remove(struct efx_nic *efx)
efx->phy_data = NULL;
}
-struct efx_phy_operations falcon_qt202x_phy_ops = {
+const struct efx_phy_operations falcon_qt202x_phy_ops = {
.probe = qt202x_phy_probe,
.init = qt202x_phy_init,
.reconfigure = qt202x_phy_reconfigure,
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index c0fdb59030fb..b7dc891b4461 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -605,6 +605,9 @@ void __efx_rx_packet(struct efx_channel *channel,
skb_record_rx_queue(skb, channel->channel);
}
+ if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
+ checksummed = false;
+
if (likely(checksummed || rx_buf->is_page)) {
efx_rx_packet_gro(channel, rx_buf, eh, checksummed);
return;
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index a0f49b348d62..50ad3bcaf68a 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -131,8 +131,6 @@ static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
static int efx_test_interrupts(struct efx_nic *efx,
struct efx_self_tests *tests)
{
- struct efx_channel *channel;
-
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
tests->interrupt = -1;
@@ -140,15 +138,6 @@ static int efx_test_interrupts(struct efx_nic *efx,
efx->last_irq_cpu = -1;
smp_wmb();
- /* ACK each interrupting event queue. Receiving an interrupt due to
- * traffic before a test event is raised is considered a pass */
- efx_for_each_channel(channel, efx) {
- if (channel->work_pending)
- efx_process_channel_now(channel);
- if (efx->last_irq_cpu >= 0)
- goto success;
- }
-
efx_nic_generate_interrupt(efx);
/* Wait for arrival of test interrupt. */
@@ -173,13 +162,13 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
struct efx_self_tests *tests)
{
struct efx_nic *efx = channel->efx;
- unsigned int magic_count, count;
+ unsigned int read_ptr, count;
tests->eventq_dma[channel->channel] = -1;
tests->eventq_int[channel->channel] = -1;
tests->eventq_poll[channel->channel] = -1;
- magic_count = channel->magic_count;
+ read_ptr = channel->eventq_read_ptr;
channel->efx->last_irq_cpu = -1;
smp_wmb();
@@ -190,10 +179,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
do {
schedule_timeout_uninterruptible(HZ / 100);
- if (channel->work_pending)
- efx_process_channel_now(channel);
-
- if (channel->magic_count != magic_count)
+ if (ACCESS_ONCE(channel->eventq_read_ptr) != read_ptr)
goto eventq_ok;
} while (++count < 2);
@@ -211,8 +197,7 @@ static int efx_test_eventq_irq(struct efx_channel *channel,
}
/* Check to see if event was received even if interrupt wasn't */
- efx_process_channel_now(channel);
- if (channel->magic_count != magic_count) {
+ if (efx_nic_event_present(channel)) {
netif_err(efx, drv, efx->net_dev,
"channel %d event was generated, but "
"failed to trigger an interrupt\n", channel->channel);
@@ -770,6 +755,8 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
__efx_reconfigure_port(efx);
mutex_unlock(&efx->mac_lock);
+ netif_tx_wake_all_queues(efx->net_dev);
+
return rc_test;
}
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index e4dd8986b1fe..ceac1c9907f0 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -581,7 +581,7 @@ static void siena_init_wol(struct efx_nic *efx)
**************************************************************************
*/
-struct efx_nic_type siena_a0_nic_type = {
+const struct efx_nic_type siena_a0_nic_type = {
.probe = siena_probe_nic,
.remove = siena_remove_nic,
.init = siena_init_nic,
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index efdceb35aaae..7b0fd89e7b85 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -460,7 +460,7 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
/* In loopback, the PHY automatically brings up the correct interface,
* but doesn't advertise the correct speed. So override it */
if (LOOPBACK_EXTERNAL(efx))
- ecmd->speed = SPEED_10000;
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
}
static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
@@ -478,7 +478,7 @@ static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising)
advertising & ADVERTISED_10000baseT_Full);
}
-struct efx_phy_operations falcon_sfx7101_phy_ops = {
+const struct efx_phy_operations falcon_sfx7101_phy_ops = {
.probe = tenxpress_phy_probe,
.init = tenxpress_phy_init,
.reconfigure = tenxpress_phy_reconfigure,
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 139801908217..d2c85dfdf3bf 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -435,7 +435,8 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
* queue state. */
smp_mb();
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
- likely(efx->port_enabled)) {
+ likely(efx->port_enabled) &&
+ likely(!efx->port_inhibited)) {
fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
diff --git a/drivers/net/sfc/txc43128_phy.c b/drivers/net/sfc/txc43128_phy.c
index d9886addcc99..7c21b334a75b 100644
--- a/drivers/net/sfc/txc43128_phy.c
+++ b/drivers/net/sfc/txc43128_phy.c
@@ -545,7 +545,7 @@ static void txc43128_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
mdio45_ethtool_gset(&efx->mdio, ecmd);
}
-struct efx_phy_operations falcon_txc_phy_ops = {
+const struct efx_phy_operations falcon_txc_phy_ops = {
.probe = txc43128_phy_probe,
.init = txc43128_phy_init,
.reconfigure = txc43128_phy_reconfigure,
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 3a0cc63428ee..dd03bf619988 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -33,7 +33,7 @@ static char *sgiseeqstr = "SGI Seeq8003";
* with that in mind, I've decided to make this driver look completely like a
* stupid Lance from a driver architecture perspective. Only difference is that
* here our "ring buffer" looks and acts like a real Lance one does but is
- * layed out like how the HPC DMA and the Seeq want it to. You'd be surprised
+ * laid out like how the HPC DMA and the Seeq want it to. You'd be surprised
* how a stupid idea like this can pay off in performance, not to mention
* making this driver 2,000 times easier to write. ;-)
*/
@@ -77,7 +77,7 @@ struct sgiseeq_tx_desc {
};
/*
- * Warning: This structure is layed out in a certain way because HPC dma
+ * Warning: This structure is laid out in a certain way because HPC dma
* descriptors must be 8-byte aligned. So don't touch this without
* some care.
*/
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index e9e7a530552c..8a72a979ee71 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1875,7 +1875,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto out_unregister;
- /* print device infomation */
+ /* print device information */
pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
(u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 3406ed870917..b436e007eea0 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -93,7 +93,7 @@ enum sis190_registers {
IntrStatus = 0x20,
IntrMask = 0x24,
IntrControl = 0x28,
- IntrTimer = 0x2c, // unused (Interupt Timer)
+ IntrTimer = 0x2c, // unused (Interrupt Timer)
PMControl = 0x30, // unused (Power Mgmt Control/Status)
rsv2 = 0x34, // reserved
ROMControl = 0x38,
@@ -234,7 +234,7 @@ enum _DescStatusBit {
RxSizeMask = 0x0000ffff
/*
* The asic could apparently do vlan, TSO, jumbo (sis191 only) and
- * provide two (unused with Linux) Tx queues. No publically
+ * provide two (unused with Linux) Tx queues. No publicly
* available documentation alas.
*/
};
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 84d4167eee9a..484f795a779d 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -240,7 +240,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
* @net_dev: the net device to get address for
*
* Older SiS900 and friends, use EEPROM to store MAC address.
- * MAC address is read from read_eeprom() into @net_dev->dev_addr.
+ * MAC address is read from read_eeprom() into @net_dev->dev_addr and
+ * @net_dev->perm_addr.
*/
static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
@@ -261,6 +262,9 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
return 1;
}
@@ -271,7 +275,8 @@ static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_de
*
* SiS630E model, use APC CMOS RAM to store MAC address.
* APC CMOS RAM is accessed through ISA bridge.
- * MAC address is read into @net_dev->dev_addr.
+ * MAC address is read into @net_dev->dev_addr and
+ * @net_dev->perm_addr.
*/
static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
@@ -296,6 +301,10 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
outb(0x09 + i, 0x70);
((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
}
+
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
@@ -310,7 +319,7 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
*
* SiS635 model, set MAC Reload Bit to load Mac address from APC
* to rfdr. rfdr is accessed through rfcr. MAC address is read into
- * @net_dev->dev_addr.
+ * @net_dev->dev_addr and @net_dev->perm_addr.
*/
static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
@@ -334,6 +343,9 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
*( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr);
}
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
/* enable packet filtering */
outl(rfcrSave | RFEN, rfcr + ioaddr);
@@ -353,7 +365,7 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
* EEDONE signal to refuse EEPROM access by LAN.
* The EEPROM map of SiS962 or SiS963 is different to SiS900.
* The signature field in SiS962 or SiS963 spec is meaningless.
- * MAC address is read into @net_dev->dev_addr.
+ * MAC address is read into @net_dev->dev_addr and @net_dev->perm_addr.
*/
static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
@@ -372,6 +384,9 @@ static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
for (i = 0; i < 3; i++)
((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ /* Store MAC Address in perm_addr */
+ memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
+
outl(EEDONE, ee_addr);
return 1;
} else {
@@ -1180,7 +1195,7 @@ sis900_init_rx_ring(struct net_device *net_dev)
*
* 630E equalizer workaround rule(Cyrus Huang 08/15)
* PHY register 14h(Test)
- * Bit 14: 0 -- Automatically dectect (default)
+ * Bit 14: 0 -- Automatically detect (default)
* 1 -- Manually set Equalizer filter
* Bit 13: 0 -- (Default)
* 1 -- Speed up convergence of equalizer setting
@@ -1192,7 +1207,7 @@ sis900_init_rx_ring(struct net_device *net_dev)
* Then set equalizer value, and set Bit 14 to 1, Bit 9 to 0
* Link Off:Set Bit 13 to 1, Bit 14 to 0
* Calculate Equalizer value:
- * When Link is ON and Bit 14 is 0, SIS900PHY will auto-dectect proper equalizer value.
+ * When Link is ON and Bit 14 is 0, SIS900PHY will auto-detect proper equalizer value.
* When the equalizer is stable, this value is not a fixed value. It will be within
* a small range(eg. 7~9). Then we get a minimum and a maximum value(eg. min=7, max=9)
* 0 <= max <= 4 --> set equalizer to max
@@ -1723,7 +1738,7 @@ static int sis900_rx(struct net_device *net_dev)
rx_size = data_size - CRC_SIZE;
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
- /* ``TOOLONG'' flag means jumbo packet recived. */
+ /* ``TOOLONG'' flag means jumbo packet received. */
if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
rx_status &= (~ ((unsigned int)TOOLONG));
#endif
diff --git a/drivers/net/skfp/ess.c b/drivers/net/skfp/ess.c
index 8639a0884f5c..2fc5987b41dc 100644
--- a/drivers/net/skfp/ess.c
+++ b/drivers/net/skfp/ess.c
@@ -241,7 +241,7 @@ int ess_raf_received_pack(struct s_smc *smc, SMbuf *mb, struct smt_header *sm,
!= SMT_RDF_SUCCESS) ||
(sm->smt_tid != smc->ess.alloc_trans_id)) {
- DB_ESS("ESS: Allocation Responce not accepted\n",0,0) ;
+ DB_ESS("ESS: Allocation Response not accepted\n",0,0) ;
return fs;
}
@@ -393,7 +393,7 @@ static int process_bw_alloc(struct s_smc *smc, long int payload, long int overhe
* | T-NEG |
* - -
*
- * T-NEG is discribed by the equation:
+ * T-NEG is described by the equation:
*
* (-) fddiMACT-NEG
* T-NEG = -------------------
@@ -479,7 +479,7 @@ static void ess_send_response(struct s_smc *smc, struct smt_header *sm,
void *p ;
/*
- * get and initialize the responce frame
+ * get and initialize the response frame
*/
if (sba_cmd == CHANGE_ALLOCATION) {
if (!(mb=smt_build_frame(smc,SMT_RAF,SMT_REPLY,
@@ -578,7 +578,7 @@ static void ess_send_alc_req(struct s_smc *smc)
}
/*
- * get and initialize the responce frame
+ * get and initialize the response frame
*/
if (!(mb=smt_build_frame(smc,SMT_RAF,SMT_REQUEST,
sizeof(struct smt_sba_alc_req))))
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index ca4e7bb6a5a8..a20ed1a98099 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -340,7 +340,7 @@ static void mac_counter_init(struct s_smc *smc)
outpw(FM_A(FM_LCNTR),0) ;
outpw(FM_A(FM_ECNTR),0) ;
/*
- * clear internal error counter stucture
+ * clear internal error counter structure
*/
ec = (u_long *)&smc->hw.fp.err_stats ;
for (i = (sizeof(struct err_st)/sizeof(long)) ; i ; i--)
@@ -1262,8 +1262,8 @@ Function DOWNCALL/INTERN (SMT, fplustm.c)
Para mode = 1 RX_ENABLE_ALLMULTI enable all multicasts
2 RX_DISABLE_ALLMULTI disable "enable all multicasts"
- 3 RX_ENABLE_PROMISC enable promiscous
- 4 RX_DISABLE_PROMISC disable promiscous
+ 3 RX_ENABLE_PROMISC enable promiscuous
+ 4 RX_DISABLE_PROMISC disable promiscuous
5 RX_ENABLE_NSA enable reception of NSA frames
6 RX_DISABLE_NSA disable reception of NSA frames
diff --git a/drivers/net/skfp/h/cmtdef.h b/drivers/net/skfp/h/cmtdef.h
index f2f771d8be76..5a6c6122ccb0 100644
--- a/drivers/net/skfp/h/cmtdef.h
+++ b/drivers/net/skfp/h/cmtdef.h
@@ -19,7 +19,7 @@
/*
* implementation specific constants
- * MODIIFY THE FOLLWOING THREE DEFINES
+ * MODIIFY THE FOLLOWING THREE DEFINES
*/
#define AMDPLC /* if Amd PLC chip used */
#ifdef CONC
@@ -456,7 +456,7 @@ struct s_plc {
u_long soft_err ; /* error counter */
u_long parity_err ; /* error counter */
u_long ebuf_err ; /* error counter */
- u_long ebuf_cont ; /* continous error counter */
+ u_long ebuf_cont ; /* continuous error counter */
u_long phyinv ; /* error counter */
u_long vsym_ctr ; /* error counter */
u_long mini_ctr ; /* error counter */
diff --git a/drivers/net/skfp/h/fplustm.h b/drivers/net/skfp/h/fplustm.h
index 6d738e1e2393..d43191ed938b 100644
--- a/drivers/net/skfp/h/fplustm.h
+++ b/drivers/net/skfp/h/fplustm.h
@@ -237,8 +237,8 @@ struct s_smt_fp {
*/
#define RX_ENABLE_ALLMULTI 1 /* enable all multicasts */
#define RX_DISABLE_ALLMULTI 2 /* disable "enable all multicasts" */
-#define RX_ENABLE_PROMISC 3 /* enable promiscous */
-#define RX_DISABLE_PROMISC 4 /* disable promiscous */
+#define RX_ENABLE_PROMISC 3 /* enable promiscuous */
+#define RX_DISABLE_PROMISC 4 /* disable promiscuous */
#define RX_ENABLE_NSA 5 /* enable reception of NSA frames */
#define RX_DISABLE_NSA 6 /* disable reception of NSA frames */
diff --git a/drivers/net/skfp/h/smc.h b/drivers/net/skfp/h/smc.h
index 026a83b9f743..c774a95902f5 100644
--- a/drivers/net/skfp/h/smc.h
+++ b/drivers/net/skfp/h/smc.h
@@ -388,7 +388,7 @@ struct smt_config {
u_long rmt_t_poll ; /* RMT : claim/beacon poller */
u_long rmt_dup_mac_behavior ; /* Flag for the beavior of SMT if
* a Duplicate MAC Address was detected.
- * FALSE: SMT will leave finaly the ring
+ * FALSE: SMT will leave finally the ring
* TRUE: SMT will reinstert into the ring
*/
u_long mac_d_max ; /* MAC : D_Max timer value */
diff --git a/drivers/net/skfp/h/smt.h b/drivers/net/skfp/h/smt.h
index 2976757a36fb..2030f9cbb24b 100644
--- a/drivers/net/skfp/h/smt.h
+++ b/drivers/net/skfp/h/smt.h
@@ -793,7 +793,7 @@ struct smt_rdf {
} ;
/*
- * SBA Request Allocation Responce Frame
+ * SBA Request Allocation Response Frame
*/
struct smt_sba_alc_res {
struct smt_header smt ; /* generic header */
diff --git a/drivers/net/skfp/h/supern_2.h b/drivers/net/skfp/h/supern_2.h
index 5ba0b8306753..0b73690280f6 100644
--- a/drivers/net/skfp/h/supern_2.h
+++ b/drivers/net/skfp/h/supern_2.h
@@ -14,7 +14,7 @@
/*
defines for AMD Supernet II chip set
- the chips are refered to as
+ the chips are referred to as
FPLUS Formac Plus
PLC Physical Layer
@@ -386,7 +386,7 @@ struct tx_queue {
#define FM_MDISRCV (4<<8) /* disable receive function */
#define FM_MRES0 (5<<8) /* reserve */
#define FM_MLIMPROM (6<<8) /* limited-promiscuous mode */
-#define FM_MPROMISCOUS (7<<8) /* address detection : promiscous */
+#define FM_MPROMISCOUS (7<<8) /* address detection : promiscuous */
#define FM_SELSA 0x0800 /* select-short-address bit */
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/skfp/hwmtm.c
index af5a755e269d..e26398b5a7dc 100644
--- a/drivers/net/skfp/hwmtm.c
+++ b/drivers/net/skfp/hwmtm.c
@@ -691,7 +691,7 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
* interrupt service routine, handles the interrupt requests
* generated by the FDDI adapter.
*
- * NOTE: The operating system dependent module must garantee that the
+ * NOTE: The operating system dependent module must guarantee that the
* interrupts of the adapter are disabled when it calls fddi_isr.
*
* About the USE_BREAK_ISR mechanismn:
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index 112d35b1bf0e..88d02d0a42c4 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -1680,7 +1680,7 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
* Prevent counter from being wrapped after
* hanging years in that interrupt.
*/
- plc->ebuf_cont++ ; /* Ebuf continous error */
+ plc->ebuf_cont++ ; /* Ebuf continuous error */
}
#ifdef SUPERNET_3
@@ -1717,8 +1717,8 @@ void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
}
#endif /* SUPERNET_3 */
} else {
- /* Reset the continous error variable */
- plc->ebuf_cont = 0 ; /* reset Ebuf continous error */
+ /* Reset the continuous error variable */
+ plc->ebuf_cont = 0 ; /* reset Ebuf continuous error */
}
if (cmd & PL_PHYINV) { /* physical layer invalid signal */
plc->phyinv++ ;
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 1e1bd0c201c8..08d94329c12f 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -219,7 +219,7 @@ void smt_emulate_token_ct(struct s_smc *smc, int mac_index)
/*
* Only when ring is up we will have a token count. The
- * flag is unfortunatly a single instance value. This
+ * flag is unfortunately a single instance value. This
* doesn't matter now, because we currently have only
* one MAC instance.
*/
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 35b28f42d208..52a48cb75440 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -303,7 +303,7 @@ static int skge_get_settings(struct net_device *dev,
ecmd->advertising = skge->advertising;
ecmd->autoneg = skge->autoneg;
- ecmd->speed = skge->speed;
+ ethtool_cmd_speed_set(ecmd, skge->speed);
ecmd->duplex = skge->duplex;
return 0;
}
@@ -321,8 +321,9 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
skge->speed = -1;
} else {
u32 setting;
+ u32 speed = ethtool_cmd_speed(ecmd);
- switch (ecmd->speed) {
+ switch (speed) {
case SPEED_1000:
if (ecmd->duplex == DUPLEX_FULL)
setting = SUPPORTED_1000baseT_Full;
@@ -355,7 +356,7 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if ((setting & supported) == 0)
return -EINVAL;
- skge->speed = ecmd->speed;
+ skge->speed = speed;
skge->duplex = ecmd->duplex;
}
@@ -537,46 +538,6 @@ static int skge_nway_reset(struct net_device *dev)
return 0;
}
-static int skge_set_sg(struct net_device *dev, u32 data)
-{
- struct skge_port *skge = netdev_priv(dev);
- struct skge_hw *hw = skge->hw;
-
- if (hw->chip_id == CHIP_ID_GENESIS && data)
- return -EOPNOTSUPP;
- return ethtool_op_set_sg(dev, data);
-}
-
-static int skge_set_tx_csum(struct net_device *dev, u32 data)
-{
- struct skge_port *skge = netdev_priv(dev);
- struct skge_hw *hw = skge->hw;
-
- if (hw->chip_id == CHIP_ID_GENESIS && data)
- return -EOPNOTSUPP;
-
- return ethtool_op_set_tx_csum(dev, data);
-}
-
-static u32 skge_get_rx_csum(struct net_device *dev)
-{
- struct skge_port *skge = netdev_priv(dev);
-
- return skge->rx_csum;
-}
-
-/* Only Yukon supports checksum offload. */
-static int skge_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct skge_port *skge = netdev_priv(dev);
-
- if (skge->hw->chip_id == CHIP_ID_GENESIS && data)
- return -EOPNOTSUPP;
-
- skge->rx_csum = data;
- return 0;
-}
-
static void skge_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *ecmd)
{
@@ -786,28 +747,27 @@ static void skge_led(struct skge_port *skge, enum led_mode mode)
}
/* blink LED's for finding board */
-static int skge_phys_id(struct net_device *dev, u32 data)
+static int skge_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
struct skge_port *skge = netdev_priv(dev);
- unsigned long ms;
- enum led_mode mode = LED_MODE_TST;
- if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
- ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT / HZ) * 1000;
- else
- ms = data * 1000;
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 2; /* cycle on/off twice per second */
- while (ms > 0) {
- skge_led(skge, mode);
- mode ^= LED_MODE_TST;
+ case ETHTOOL_ID_ON:
+ skge_led(skge, LED_MODE_TST);
+ break;
- if (msleep_interruptible(BLINK_MS))
- break;
- ms -= BLINK_MS;
- }
+ case ETHTOOL_ID_OFF:
+ skge_led(skge, LED_MODE_OFF);
+ break;
- /* back to regular LED state */
- skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
+ case ETHTOOL_ID_INACTIVE:
+ /* back to regular LED state */
+ skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF);
+ }
return 0;
}
@@ -925,12 +885,8 @@ static const struct ethtool_ops skge_ethtool_ops = {
.set_pauseparam = skge_set_pauseparam,
.get_coalesce = skge_get_coalesce,
.set_coalesce = skge_set_coalesce,
- .set_sg = skge_set_sg,
- .set_tx_csum = skge_set_tx_csum,
- .get_rx_csum = skge_get_rx_csum,
- .set_rx_csum = skge_set_rx_csum,
.get_strings = skge_get_strings,
- .phys_id = skge_phys_id,
+ .set_phys_id = skge_set_phys_id,
.get_sset_count = skge_get_sset_count,
.get_ethtool_stats = skge_get_ethtool_stats,
};
@@ -3085,7 +3041,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
}
skb_put(skb, len);
- if (skge->rx_csum) {
+
+ if (dev->features & NETIF_F_RXCSUM) {
skb->csum = csum;
skb->ip_summed = CHECKSUM_COMPLETE;
}
@@ -3847,10 +3804,10 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
if (hw->chip_id != CHIP_ID_GENESIS) {
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
- skge->rx_csum = 1;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= dev->hw_features;
}
- dev->features |= NETIF_F_GRO;
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 507addcaffa3..598bf7a1a55e 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -1038,7 +1038,7 @@ enum {
PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
- PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occured */
+ PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */
PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
@@ -1721,8 +1721,8 @@ enum {
GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
- GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occured */
- GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occured */
+ GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */
+ GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */
GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
@@ -2227,7 +2227,7 @@ enum {
XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */
XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */
XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */
- XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occured */
+ XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occurred */
XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */
XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */
XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */
@@ -2460,7 +2460,6 @@ struct skge_port {
struct timer_list link_timer;
enum pause_control flow_control;
enum pause_status flow_status;
- u8 rx_csum;
u8 blink_on;
u8 wol;
u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 2a91868788f7..3ee41da130c2 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -932,7 +932,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
- /* On chips without ram buffer, pause is controled by MAC level */
+ /* On chips without ram buffer, pause is controlled by MAC level */
if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
/* Pause threshold is scaled by 8 in bytes */
if (hw->chip_id == CHIP_ID_YUKON_FE_P &&
@@ -1198,12 +1198,12 @@ static void rx_set_checksum(struct sky2_port *sky2)
sky2_write32(sky2->hw,
Q_ADDR(rxqaddr[sky2->port], Q_CSR),
- (sky2->flags & SKY2_FLAG_RX_CHECKSUM)
+ (sky2->netdev->features & NETIF_F_RXCSUM)
? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
/* Enable/disable receive hash calculation (RSS) */
-static void rx_set_rss(struct net_device *dev)
+static void rx_set_rss(struct net_device *dev, u32 features)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -1216,7 +1216,7 @@ static void rx_set_rss(struct net_device *dev)
}
/* Program RSS initial values */
- if (dev->features & NETIF_F_RXHASH) {
+ if (features & NETIF_F_RXHASH) {
u32 key[nkeys];
get_random_bytes(key, nkeys * sizeof(u32));
@@ -1322,32 +1322,32 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return err;
}
-#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
+#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
-static void sky2_vlan_mode(struct net_device *dev)
+static void sky2_vlan_mode(struct net_device *dev, u32 features)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
- if (dev->features & NETIF_F_HW_VLAN_RX)
+ if (features & NETIF_F_HW_VLAN_RX)
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON);
else
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_OFF);
- dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
- if (dev->features & NETIF_F_HW_VLAN_TX)
+ if (features & NETIF_F_HW_VLAN_TX) {
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_ON);
- else {
+
+ dev->vlan_features |= SKY2_VLAN_OFFLOADS;
+ } else {
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_OFF);
/* Can't do transmit offload of vlan without hw vlan */
- dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
- | NETIF_F_ALL_CSUM);
+ dev->vlan_features &= ~SKY2_VLAN_OFFLOADS;
}
}
@@ -1463,7 +1463,7 @@ static void sky2_rx_start(struct sky2_port *sky2)
rx_set_checksum(sky2);
if (!(hw->flags & SKY2_HW_RSS_BROKEN))
- rx_set_rss(sky2->netdev);
+ rx_set_rss(sky2->netdev, sky2->netdev->features);
/* submit Rx ring */
for (i = 0; i < sky2->rx_pending; i++) {
@@ -1626,7 +1626,8 @@ static void sky2_hw_up(struct sky2_port *sky2)
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
sky2->tx_ring_size - 1);
- sky2_vlan_mode(sky2->netdev);
+ sky2_vlan_mode(sky2->netdev, sky2->netdev->features);
+ netdev_update_features(sky2->netdev);
sky2_rx_start(sky2);
}
@@ -2261,12 +2262,9 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
hw->chip_id == CHIP_ID_YUKON_FE_P))
return -EINVAL;
- /* TSO, etc on Yukon Ultra and MTU > 1500 not supported */
- if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U)
- dev->features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
-
if (!netif_running(dev)) {
dev->mtu = new_mtu;
+ netdev_update_features(dev);
return 0;
}
@@ -2288,6 +2286,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_rx_clean(sky2);
dev->mtu = new_mtu;
+ netdev_update_features(dev);
mode = DATA_BLIND_VAL(DATA_BLIND_DEF) |
GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
@@ -2535,8 +2534,11 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
"%s: receive checksum problem (status = %#x)\n",
sky2->netdev->name, status);
- /* Disable checksum offload */
- sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
+ /* Disable checksum offload
+ * It will be reenabled on next ndo_set_features, but if it's
+ * really broken, will get disabled again
+ */
+ sky2->netdev->features &= ~NETIF_F_RXCSUM;
sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
BMU_DIS_RX_CHKSUM);
}
@@ -2591,7 +2593,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
/* This chip reports checksum status differently */
if (hw->flags & SKY2_HW_NEW_LE) {
- if ((sky2->flags & SKY2_FLAG_RX_CHECKSUM) &&
+ if ((dev->features & NETIF_F_RXCSUM) &&
(le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
(le->css & CSS_TCPUDPCSOK))
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2616,7 +2618,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
sky2->rx_tag = length;
/* fall through */
case OP_RXCHKS:
- if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
+ if (likely(dev->features & NETIF_F_RXCSUM))
sky2_rx_checksum(sky2, status);
break;
@@ -3255,7 +3257,7 @@ static void sky2_reset(struct sky2_hw *hw)
/* Take device down (offline).
* Equivalent to doing dev_stop() but this does not
- * inform upper layers of the transistion.
+ * inform upper layers of the transition.
*/
static void sky2_detach(struct net_device *dev)
{
@@ -3411,10 +3413,10 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
ecmd->phy_address = PHY_ADDR_MARV;
if (sky2_is_copper(hw)) {
ecmd->port = PORT_TP;
- ecmd->speed = sky2->speed;
+ ethtool_cmd_speed_set(ecmd, sky2->speed);
ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
} else {
- ecmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, SPEED_1000);
ecmd->port = PORT_FIBRE;
ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
}
@@ -3450,8 +3452,9 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
sky2->speed = -1;
} else {
u32 setting;
+ u32 speed = ethtool_cmd_speed(ecmd);
- switch (ecmd->speed) {
+ switch (speed) {
case SPEED_1000:
if (ecmd->duplex == DUPLEX_FULL)
setting = SUPPORTED_1000baseT_Full;
@@ -3484,7 +3487,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if ((setting & supported) == 0)
return -EINVAL;
- sky2->speed = ecmd->speed;
+ sky2->speed = speed;
sky2->duplex = ecmd->duplex;
sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
}
@@ -3552,28 +3555,6 @@ static const struct sky2_stat {
{ "tx_fifo_underrun", GM_TXE_FIFO_UR },
};
-static u32 sky2_get_rx_csum(struct net_device *dev)
-{
- struct sky2_port *sky2 = netdev_priv(dev);
-
- return !!(sky2->flags & SKY2_FLAG_RX_CHECKSUM);
-}
-
-static int sky2_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct sky2_port *sky2 = netdev_priv(dev);
-
- if (data)
- sky2->flags |= SKY2_FLAG_RX_CHECKSUM;
- else
- sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
-
- sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
- data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
-
- return 0;
-}
-
static u32 sky2_get_msglevel(struct net_device *netdev)
{
struct sky2_port *sky2 = netdev_priv(netdev);
@@ -3826,23 +3807,24 @@ static void sky2_led(struct sky2_port *sky2, enum led_mode mode)
}
/* blink LED's for finding board */
-static int sky2_phys_id(struct net_device *dev, u32 data)
+static int sky2_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
struct sky2_port *sky2 = netdev_priv(dev);
- unsigned int i;
-
- if (data == 0)
- data = UINT_MAX;
- for (i = 0; i < data; i++) {
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+ case ETHTOOL_ID_INACTIVE:
+ sky2_led(sky2, MO_LED_NORM);
+ break;
+ case ETHTOOL_ID_ON:
sky2_led(sky2, MO_LED_ON);
- if (msleep_interruptible(500))
- break;
+ break;
+ case ETHTOOL_ID_OFF:
sky2_led(sky2, MO_LED_OFF);
- if (msleep_interruptible(500))
- break;
+ break;
}
- sky2_led(sky2, MO_LED_NORM);
return 0;
}
@@ -4083,34 +4065,6 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
}
}
-/* In order to do Jumbo packets on these chips, need to turn off the
- * transmit store/forward. Therefore checksum offload won't work.
- */
-static int no_tx_offload(struct net_device *dev)
-{
- const struct sky2_port *sky2 = netdev_priv(dev);
- const struct sky2_hw *hw = sky2->hw;
-
- return dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U;
-}
-
-static int sky2_set_tx_csum(struct net_device *dev, u32 data)
-{
- if (data && no_tx_offload(dev))
- return -EINVAL;
-
- return ethtool_op_set_tx_csum(dev, data);
-}
-
-
-static int sky2_set_tso(struct net_device *dev, u32 data)
-{
- if (data && no_tx_offload(dev))
- return -EINVAL;
-
- return ethtool_op_set_tso(dev, data);
-}
-
static int sky2_get_eeprom_len(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
@@ -4213,31 +4167,36 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
}
-static int sky2_set_flags(struct net_device *dev, u32 data)
+static u32 sky2_fix_features(struct net_device *dev, u32 features)
{
- struct sky2_port *sky2 = netdev_priv(dev);
- unsigned long old_feat = dev->features;
- u32 supported = 0;
- int rc;
+ const struct sky2_port *sky2 = netdev_priv(dev);
+ const struct sky2_hw *hw = sky2->hw;
- if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
- supported |= ETH_FLAG_RXHASH;
+ /* In order to do Jumbo packets on these chips, need to turn off the
+ * transmit store/forward. Therefore checksum offload won't work.
+ */
+ if (dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U)
+ features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
- if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
- supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
+ return features;
+}
- printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
- supported, data);
+static int sky2_set_features(struct net_device *dev, u32 features)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
- rc = ethtool_op_set_flags(dev, data, supported);
- if (rc)
- return rc;
+ if (changed & NETIF_F_RXCSUM) {
+ u32 on = features & NETIF_F_RXCSUM;
+ sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+ }
- if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
- rx_set_rss(dev);
+ if (changed & NETIF_F_RXHASH)
+ rx_set_rss(dev, features);
- if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
- sky2_vlan_mode(dev);
+ if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
+ sky2_vlan_mode(dev, features);
return 0;
}
@@ -4257,11 +4216,6 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.get_eeprom_len = sky2_get_eeprom_len,
.get_eeprom = sky2_get_eeprom,
.set_eeprom = sky2_set_eeprom,
- .set_sg = ethtool_op_set_sg,
- .set_tx_csum = sky2_set_tx_csum,
- .set_tso = sky2_set_tso,
- .get_rx_csum = sky2_get_rx_csum,
- .set_rx_csum = sky2_set_rx_csum,
.get_strings = sky2_get_strings,
.get_coalesce = sky2_get_coalesce,
.set_coalesce = sky2_set_coalesce,
@@ -4269,11 +4223,9 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.set_ringparam = sky2_set_ringparam,
.get_pauseparam = sky2_get_pauseparam,
.set_pauseparam = sky2_set_pauseparam,
- .phys_id = sky2_phys_id,
+ .set_phys_id = sky2_set_phys_id,
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
- .set_flags = sky2_set_flags,
- .get_flags = ethtool_op_get_flags,
};
#ifdef CONFIG_SKY2_DEBUG
@@ -4553,6 +4505,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_set_mac_address = sky2_set_mac_address,
.ndo_set_multicast_list = sky2_set_multicast,
.ndo_change_mtu = sky2_change_mtu,
+ .ndo_fix_features = sky2_fix_features,
+ .ndo_set_features = sky2_set_features,
.ndo_tx_timeout = sky2_tx_timeout,
.ndo_get_stats64 = sky2_get_stats,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4568,6 +4522,8 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
.ndo_set_mac_address = sky2_set_mac_address,
.ndo_set_multicast_list = sky2_set_multicast,
.ndo_change_mtu = sky2_change_mtu,
+ .ndo_fix_features = sky2_fix_features,
+ .ndo_set_features = sky2_set_features,
.ndo_tx_timeout = sky2_tx_timeout,
.ndo_get_stats64 = sky2_get_stats,
},
@@ -4600,7 +4556,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
/* Auto speed and flow control */
sky2->flags = SKY2_FLAG_AUTO_SPEED | SKY2_FLAG_AUTO_PAUSE;
if (hw->chip_id != CHIP_ID_YUKON_XL)
- sky2->flags |= SKY2_FLAG_RX_CHECKSUM;
+ dev->hw_features |= NETIF_F_RXCSUM;
sky2->flow_mode = FC_BOTH;
@@ -4619,18 +4575,21 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
sky2->port = port;
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
- | NETIF_F_TSO | NETIF_F_GRO;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
if (highmem)
dev->features |= NETIF_F_HIGHDMA;
/* Enable receive hashing unless hardware is known broken */
if (!(hw->flags & SKY2_HW_RSS_BROKEN))
- dev->features |= NETIF_F_RXHASH;
+ dev->hw_features |= NETIF_F_RXHASH;
+
+ if (!(hw->flags & SKY2_HW_VLAN_BROKEN)) {
+ dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->vlan_features |= SKY2_VLAN_OFFLOADS;
+ }
- if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->features |= dev->hw_features;
/* read the mac address */
memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 6861b0e8db9a..318c9ae7bf91 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1194,7 +1194,7 @@ enum {
PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */
PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */
- PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occured */
+ PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */
PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */
PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */
PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */
@@ -1725,8 +1725,8 @@ enum {
GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */
GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */
GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */
- GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occured */
- GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occured */
+ GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */
+ GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */
GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */
GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
@@ -2254,7 +2254,6 @@ struct sky2_port {
u8 wol; /* WAKE_ bits */
u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
u16 flags;
-#define SKY2_FLAG_RX_CHECKSUM 0x0001
#define SKY2_FLAG_AUTO_SPEED 0x0002
#define SKY2_FLAG_AUTO_PAUSE 0x0004
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 86cbb9ea2f26..8ec1a9a0bb9a 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -853,7 +853,9 @@ static int slip_open(struct tty_struct *tty)
/* Done. We have linked the TTY line to a channel. */
rtnl_unlock();
tty->receive_room = 65536; /* We don't flow control */
- return sl->dev->base_addr;
+
+ /* TTY layer expects 0 on success */
+ return 0;
err_free_bufs:
sl_free_bufs(sl);
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index d07c39cb4daf..0f29f261fcfe 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -156,7 +156,7 @@ static const struct {
{ 14, 15 }
};
-static short smc_mca_adapter_ids[] __initdata = {
+static const short smc_mca_adapter_ids[] __devinitconst = {
0x61c8,
0x61c9,
0x6fc0,
@@ -168,7 +168,7 @@ static short smc_mca_adapter_ids[] __initdata = {
0x0000
};
-static char *smc_mca_adapter_names[] __initdata = {
+static const char *const smc_mca_adapter_names[] __devinitconst = {
"SMC Ethercard PLUS Elite/A BNC/AUI (WD8013EP/A)",
"SMC Ethercard PLUS Elite/A UTP/AUI (WD8013WP/A)",
"WD Ethercard PLUS/A (WD8003E/A or WD8003ET/A)",
@@ -199,7 +199,7 @@ static const struct net_device_ops ultramca_netdev_ops = {
#endif
};
-static int __init ultramca_probe(struct device *gen_dev)
+static int __devinit ultramca_probe(struct device *gen_dev)
{
unsigned short ioaddr;
struct net_device *dev;
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 66831f378396..053863aefb12 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1488,9 +1488,9 @@ smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
SUPPORTED_TP | SUPPORTED_AUI;
if (lp->ctl_rspeed == 10)
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
else if (lp->ctl_rspeed == 100)
- cmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(cmd, SPEED_100);
cmd->autoneg = AUTONEG_DISABLE;
if (lp->mii.phy_id==1)
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 43654a3bb0ec..dc4805f473e3 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1565,9 +1565,9 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
SUPPORTED_TP | SUPPORTED_AUI;
if (lp->ctl_rspeed == 10)
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
else if (lp->ctl_rspeed == 100)
- cmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(cmd, SPEED_100);
cmd->autoneg = AUTONEG_DISABLE;
cmd->transceiver = XCVR_INTERNAL;
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 68d48ab6eacf..5f53fbbf67be 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -921,7 +921,7 @@ static const char * chip_ids[ 16 ] = {
* Hack Alert: Some setups just can't write 8 or 16 bits reliably when not
* aligned to a 32 bit boundary. I tell you that does exist!
* Fortunately the affected register accesses can be easily worked around
- * since we can write zeroes to the preceeding 16 bits without adverse
+ * since we can write zeroes to the preceding 16 bits without adverse
* effects and use a 32-bit access.
*
* Enforce it on any 32-bit capable setup for now.
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 1566259c1f27..c6d47d10590c 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -29,6 +29,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -69,6 +71,17 @@ static int debug = 3;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+struct smsc911x_data;
+
+struct smsc911x_ops {
+ u32 (*reg_read)(struct smsc911x_data *pdata, u32 reg);
+ void (*reg_write)(struct smsc911x_data *pdata, u32 reg, u32 val);
+ void (*rx_readfifo)(struct smsc911x_data *pdata,
+ unsigned int *buf, unsigned int wordcount);
+ void (*tx_writefifo)(struct smsc911x_data *pdata,
+ unsigned int *buf, unsigned int wordcount);
+};
+
struct smsc911x_data {
void __iomem *ioaddr;
@@ -116,8 +129,14 @@ struct smsc911x_data {
unsigned int clear_bits_mask;
unsigned int hashhi;
unsigned int hashlo;
+
+ /* register access functions */
+ const struct smsc911x_ops *ops;
};
+/* Easy access to information */
+#define __smsc_shift(pdata, reg) ((reg) << ((pdata)->config.shift))
+
static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
{
if (pdata->config.flags & SMSC911X_USE_32BIT)
@@ -131,13 +150,29 @@ static inline u32 __smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
return 0;
}
+static inline u32
+__smsc911x_reg_read_shift(struct smsc911x_data *pdata, u32 reg)
+{
+ if (pdata->config.flags & SMSC911X_USE_32BIT)
+ return readl(pdata->ioaddr + __smsc_shift(pdata, reg));
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT)
+ return (readw(pdata->ioaddr +
+ __smsc_shift(pdata, reg)) & 0xFFFF) |
+ ((readw(pdata->ioaddr +
+ __smsc_shift(pdata, reg + 2)) & 0xFFFF) << 16);
+
+ BUG();
+ return 0;
+}
+
static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg)
{
u32 data;
unsigned long flags;
spin_lock_irqsave(&pdata->dev_lock, flags);
- data = __smsc911x_reg_read(pdata, reg);
+ data = pdata->ops->reg_read(pdata, reg);
spin_unlock_irqrestore(&pdata->dev_lock, flags);
return data;
@@ -160,13 +195,32 @@ static inline void __smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
BUG();
}
+static inline void
+__smsc911x_reg_write_shift(struct smsc911x_data *pdata, u32 reg, u32 val)
+{
+ if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ writel(val, pdata->ioaddr + __smsc_shift(pdata, reg));
+ return;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ writew(val & 0xFFFF,
+ pdata->ioaddr + __smsc_shift(pdata, reg));
+ writew((val >> 16) & 0xFFFF,
+ pdata->ioaddr + __smsc_shift(pdata, reg + 2));
+ return;
+ }
+
+ BUG();
+}
+
static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg,
u32 val)
{
unsigned long flags;
spin_lock_irqsave(&pdata->dev_lock, flags);
- __smsc911x_reg_write(pdata, reg, val);
+ pdata->ops->reg_write(pdata, reg, val);
spin_unlock_irqrestore(&pdata->dev_lock, flags);
}
@@ -202,6 +256,40 @@ out:
spin_unlock_irqrestore(&pdata->dev_lock, flags);
}
+/* Writes a packet to the TX_DATA_FIFO - shifted version */
+static inline void
+smsc911x_tx_writefifo_shift(struct smsc911x_data *pdata, unsigned int *buf,
+ unsigned int wordcount)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+
+ if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
+ while (wordcount--)
+ __smsc911x_reg_write_shift(pdata, TX_DATA_FIFO,
+ swab32(*buf++));
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ writesl(pdata->ioaddr + __smsc_shift(pdata,
+ TX_DATA_FIFO), buf, wordcount);
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ while (wordcount--)
+ __smsc911x_reg_write_shift(pdata,
+ TX_DATA_FIFO, *buf++);
+ goto out;
+ }
+
+ BUG();
+out:
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
+}
+
/* Reads a packet out of the RX_DATA_FIFO */
static inline void
smsc911x_rx_readfifo(struct smsc911x_data *pdata, unsigned int *buf,
@@ -234,6 +322,40 @@ out:
spin_unlock_irqrestore(&pdata->dev_lock, flags);
}
+/* Reads a packet out of the RX_DATA_FIFO - shifted version */
+static inline void
+smsc911x_rx_readfifo_shift(struct smsc911x_data *pdata, unsigned int *buf,
+ unsigned int wordcount)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pdata->dev_lock, flags);
+
+ if (pdata->config.flags & SMSC911X_SWAP_FIFO) {
+ while (wordcount--)
+ *buf++ = swab32(__smsc911x_reg_read_shift(pdata,
+ RX_DATA_FIFO));
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_32BIT) {
+ readsl(pdata->ioaddr + __smsc_shift(pdata,
+ RX_DATA_FIFO), buf, wordcount);
+ goto out;
+ }
+
+ if (pdata->config.flags & SMSC911X_USE_16BIT) {
+ while (wordcount--)
+ *buf++ = __smsc911x_reg_read_shift(pdata,
+ RX_DATA_FIFO);
+ goto out;
+ }
+
+ BUG();
+out:
+ spin_unlock_irqrestore(&pdata->dev_lock, flags);
+}
+
/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
* and smsc911x_mac_write, so assumes mac_lock is held */
static int smsc911x_mac_complete(struct smsc911x_data *pdata)
@@ -248,8 +370,8 @@ static int smsc911x_mac_complete(struct smsc911x_data *pdata)
if (!(val & MAC_CSR_CMD_CSR_BUSY_))
return 0;
}
- SMSC_WARNING(HW, "Timed out waiting for MAC not BUSY. "
- "MAC_CSR_CMD: 0x%08X", val);
+ SMSC_WARN(pdata, hw, "Timed out waiting for MAC not BUSY. "
+ "MAC_CSR_CMD: 0x%08X", val);
return -EIO;
}
@@ -262,7 +384,7 @@ static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset)
temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
- SMSC_WARNING(HW, "MAC busy at entry");
+ SMSC_WARN(pdata, hw, "MAC busy at entry");
return 0xFFFFFFFF;
}
@@ -277,7 +399,7 @@ static u32 smsc911x_mac_read(struct smsc911x_data *pdata, unsigned int offset)
if (likely(smsc911x_mac_complete(pdata) == 0))
return smsc911x_reg_read(pdata, MAC_CSR_DATA);
- SMSC_WARNING(HW, "MAC busy after read");
+ SMSC_WARN(pdata, hw, "MAC busy after read");
return 0xFFFFFFFF;
}
@@ -291,8 +413,8 @@ static void smsc911x_mac_write(struct smsc911x_data *pdata,
temp = smsc911x_reg_read(pdata, MAC_CSR_CMD);
if (unlikely(temp & MAC_CSR_CMD_CSR_BUSY_)) {
- SMSC_WARNING(HW,
- "smsc911x_mac_write failed, MAC busy at entry");
+ SMSC_WARN(pdata, hw,
+ "smsc911x_mac_write failed, MAC busy at entry");
return;
}
@@ -310,8 +432,7 @@ static void smsc911x_mac_write(struct smsc911x_data *pdata,
if (likely(smsc911x_mac_complete(pdata) == 0))
return;
- SMSC_WARNING(HW,
- "smsc911x_mac_write failed, MAC busy after write");
+ SMSC_WARN(pdata, hw, "smsc911x_mac_write failed, MAC busy after write");
}
/* Get a phy register */
@@ -326,8 +447,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
/* Confirm MII not busy */
if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
- SMSC_WARNING(HW,
- "MII is busy in smsc911x_mii_read???");
+ SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_read???");
reg = -EIO;
goto out;
}
@@ -343,7 +463,7 @@ static int smsc911x_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
goto out;
}
- SMSC_WARNING(HW, "Timed out waiting for MII read to finish");
+ SMSC_WARN(pdata, hw, "Timed out waiting for MII read to finish");
reg = -EIO;
out:
@@ -364,8 +484,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
/* Confirm MII not busy */
if (unlikely(smsc911x_mac_read(pdata, MII_ACC) & MII_ACC_MII_BUSY_)) {
- SMSC_WARNING(HW,
- "MII is busy in smsc911x_mii_write???");
+ SMSC_WARN(pdata, hw, "MII is busy in smsc911x_mii_write???");
reg = -EIO;
goto out;
}
@@ -385,7 +504,7 @@ static int smsc911x_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
goto out;
}
- SMSC_WARNING(HW, "Timed out waiting for MII write to finish");
+ SMSC_WARN(pdata, hw, "Timed out waiting for MII write to finish");
reg = -EIO;
out:
@@ -426,18 +545,20 @@ static void smsc911x_phy_initialise_external(struct smsc911x_data *pdata)
unsigned int hwcfg = smsc911x_reg_read(pdata, HW_CFG);
if (pdata->config.flags & SMSC911X_FORCE_INTERNAL_PHY) {
- SMSC_TRACE(HW, "Forcing internal PHY");
+ SMSC_TRACE(pdata, hw, "Forcing internal PHY");
pdata->using_extphy = 0;
} else if (pdata->config.flags & SMSC911X_FORCE_EXTERNAL_PHY) {
- SMSC_TRACE(HW, "Forcing external PHY");
+ SMSC_TRACE(pdata, hw, "Forcing external PHY");
smsc911x_phy_enable_external(pdata);
pdata->using_extphy = 1;
} else if (hwcfg & HW_CFG_EXT_PHY_DET_) {
- SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET set, using external PHY");
+ SMSC_TRACE(pdata, hw,
+ "HW_CFG EXT_PHY_DET set, using external PHY");
smsc911x_phy_enable_external(pdata);
pdata->using_extphy = 1;
} else {
- SMSC_TRACE(HW, "HW_CFG EXT_PHY_DET clear, using internal PHY");
+ SMSC_TRACE(pdata, hw,
+ "HW_CFG EXT_PHY_DET clear, using internal PHY");
pdata->using_extphy = 0;
}
}
@@ -499,7 +620,7 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
wrsz += (u32)((ulong)pdata->loopback_tx_pkt & 0x3);
wrsz >>= 2;
- smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
+ pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
/* Wait till transmit is done */
i = 60;
@@ -509,13 +630,13 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
} while ((i--) && (!status));
if (!status) {
- SMSC_WARNING(HW, "Failed to transmit "
- "during loopback test");
+ SMSC_WARN(pdata, hw,
+ "Failed to transmit during loopback test");
continue;
}
if (status & TX_STS_ES_) {
- SMSC_WARNING(HW, "Transmit encountered "
- "errors during loopback test");
+ SMSC_WARN(pdata, hw,
+ "Transmit encountered errors during loopback test");
continue;
}
@@ -527,13 +648,13 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
} while ((i--) && (!status));
if (!status) {
- SMSC_WARNING(HW,
- "Failed to receive during loopback test");
+ SMSC_WARN(pdata, hw,
+ "Failed to receive during loopback test");
continue;
}
if (status & RX_STS_ES_) {
- SMSC_WARNING(HW, "Receive encountered "
- "errors during loopback test");
+ SMSC_WARN(pdata, hw,
+ "Receive encountered errors during loopback test");
continue;
}
@@ -543,12 +664,12 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
rdsz += (u32)((ulong)pdata->loopback_rx_pkt & 0x3);
rdsz >>= 2;
- smsc911x_rx_readfifo(pdata, (unsigned int *)bufp, rdsz);
+ pdata->ops->rx_readfifo(pdata, (unsigned int *)bufp, rdsz);
if (pktlength != (MIN_PACKET_SIZE + 4)) {
- SMSC_WARNING(HW, "Unexpected packet size "
- "during loop back test, size=%d, will retry",
- pktlength);
+ SMSC_WARN(pdata, hw, "Unexpected packet size "
+ "during loop back test, size=%d, will retry",
+ pktlength);
} else {
unsigned int j;
int mismatch = 0;
@@ -560,12 +681,12 @@ static int smsc911x_phy_check_loopbackpkt(struct smsc911x_data *pdata)
}
}
if (!mismatch) {
- SMSC_TRACE(HW, "Successfully verified "
+ SMSC_TRACE(pdata, hw, "Successfully verified "
"loopback packet");
return 0;
} else {
- SMSC_WARNING(HW, "Data mismatch "
- "during loop back test, will retry");
+ SMSC_WARN(pdata, hw, "Data mismatch "
+ "during loop back test, will retry");
}
}
}
@@ -582,7 +703,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata)
BUG_ON(!phy_dev);
BUG_ON(!phy_dev->bus);
- SMSC_TRACE(HW, "Performing PHY BCR Reset");
+ SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
do {
msleep(1);
@@ -591,7 +712,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata)
} while ((i--) && (temp & BMCR_RESET));
if (temp & BMCR_RESET) {
- SMSC_WARNING(HW, "PHY reset failed to complete.");
+ SMSC_WARN(pdata, hw, "PHY reset failed to complete");
return -EIO;
}
/* Extra delay required because the phy may not be completed with
@@ -695,11 +816,11 @@ static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
else
afc &= ~0xF;
- SMSC_TRACE(HW, "rx pause %s, tx pause %s",
- (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
- (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+ SMSC_TRACE(pdata, hw, "rx pause %s, tx pause %s",
+ (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
+ (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
} else {
- SMSC_TRACE(HW, "half duplex");
+ SMSC_TRACE(pdata, hw, "half duplex");
flow = 0;
afc |= 0xF;
}
@@ -722,17 +843,17 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
if (phy_dev->duplex != pdata->last_duplex) {
unsigned int mac_cr;
- SMSC_TRACE(HW, "duplex state has changed");
+ SMSC_TRACE(pdata, hw, "duplex state has changed");
spin_lock_irqsave(&pdata->mac_lock, flags);
mac_cr = smsc911x_mac_read(pdata, MAC_CR);
if (phy_dev->duplex) {
- SMSC_TRACE(HW,
- "configuring for full duplex mode");
+ SMSC_TRACE(pdata, hw,
+ "configuring for full duplex mode");
mac_cr |= MAC_CR_FDPX_;
} else {
- SMSC_TRACE(HW,
- "configuring for half duplex mode");
+ SMSC_TRACE(pdata, hw,
+ "configuring for half duplex mode");
mac_cr &= ~MAC_CR_FDPX_;
}
smsc911x_mac_write(pdata, MAC_CR, mac_cr);
@@ -744,9 +865,9 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
carrier = netif_carrier_ok(dev);
if (carrier != pdata->last_carrier) {
- SMSC_TRACE(HW, "carrier state has changed");
+ SMSC_TRACE(pdata, hw, "carrier state has changed");
if (carrier) {
- SMSC_TRACE(HW, "configuring for carrier OK");
+ SMSC_TRACE(pdata, hw, "configuring for carrier OK");
if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) &&
(!pdata->using_extphy)) {
/* Restore original GPIO configuration */
@@ -755,7 +876,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
pdata->gpio_setting);
}
} else {
- SMSC_TRACE(HW, "configuring for no carrier");
+ SMSC_TRACE(pdata, hw, "configuring for no carrier");
/* Check global setting that LED1
* usage is 10/100 indicator */
pdata->gpio_setting = smsc911x_reg_read(pdata,
@@ -787,25 +908,25 @@ static int smsc911x_mii_probe(struct net_device *dev)
/* find the first phy */
phydev = phy_find_first(pdata->mii_bus);
if (!phydev) {
- pr_err("%s: no PHY found\n", dev->name);
+ netdev_err(dev, "no PHY found\n");
return -ENODEV;
}
- SMSC_TRACE(PROBE, "PHY: addr %d, phy_id 0x%08X",
- phydev->addr, phydev->phy_id);
+ SMSC_TRACE(pdata, probe, "PHY: addr %d, phy_id 0x%08X",
+ phydev->addr, phydev->phy_id);
ret = phy_connect_direct(dev, phydev,
&smsc911x_phy_adjust_link, 0,
pdata->config.phy_interface);
if (ret) {
- pr_err("%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return ret;
}
- pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
- dev->name, phydev->drv->name,
- dev_name(&phydev->dev), phydev->irq);
+ netdev_info(dev,
+ "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
/* mask with MAC supported features */
phydev->supported &= (PHY_BASIC_FEATURES | SUPPORTED_Pause |
@@ -818,13 +939,13 @@ static int smsc911x_mii_probe(struct net_device *dev)
#ifdef USE_PHY_WORK_AROUND
if (smsc911x_phy_loopbacktest(dev) < 0) {
- SMSC_WARNING(HW, "Failed Loop Back Test");
+ SMSC_WARN(pdata, hw, "Failed Loop Back Test");
return -ENODEV;
}
- SMSC_TRACE(HW, "Passed Loop Back Test");
+ SMSC_TRACE(pdata, hw, "Passed Loop Back Test");
#endif /* USE_PHY_WORK_AROUND */
- SMSC_TRACE(HW, "phy initialised successfully");
+ SMSC_TRACE(pdata, hw, "phy initialised successfully");
return 0;
}
@@ -860,8 +981,8 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev,
smsc911x_phy_initialise_external(pdata);
break;
default:
- SMSC_TRACE(HW, "External PHY is not supported, "
- "using internal PHY");
+ SMSC_TRACE(pdata, hw, "External PHY is not supported, "
+ "using internal PHY");
pdata->using_extphy = 0;
break;
}
@@ -872,12 +993,12 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev,
}
if (mdiobus_register(pdata->mii_bus)) {
- SMSC_WARNING(PROBE, "Error registering mii bus");
+ SMSC_WARN(pdata, probe, "Error registering mii bus");
goto err_out_free_bus_2;
}
if (smsc911x_mii_probe(dev) < 0) {
- SMSC_WARNING(PROBE, "Error registering mii bus");
+ SMSC_WARN(pdata, probe, "Error registering mii bus");
goto err_out_unregister_bus_3;
}
@@ -913,8 +1034,7 @@ static void smsc911x_tx_update_txcounters(struct net_device *dev)
* does not reference a hardware defined reserved bit
* but rather a driver defined one.
*/
- SMSC_WARNING(HW,
- "Packet tag reserved bit is high");
+ SMSC_WARN(pdata, hw, "Packet tag reserved bit is high");
} else {
if (unlikely(tx_stat & TX_STS_ES_)) {
dev->stats.tx_errors++;
@@ -977,8 +1097,8 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
} while ((val & RX_DP_CTRL_RX_FFWD_) && --timeout);
if (unlikely(timeout == 0))
- SMSC_WARNING(HW, "Timed out waiting for "
- "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val);
+ SMSC_WARN(pdata, hw, "Timed out waiting for "
+ "RX FFWD to finish, RX_DP_CTRL: 0x%08X", val);
} else {
unsigned int temp;
while (pktwords--)
@@ -1021,8 +1141,8 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
smsc911x_rx_counterrors(dev, rxstat);
if (unlikely(rxstat & RX_STS_ES_)) {
- SMSC_WARNING(RX_ERR,
- "Discarding packet with error bit set");
+ SMSC_WARN(pdata, rx_err,
+ "Discarding packet with error bit set");
/* Packet has an error, discard it and continue with
* the next */
smsc911x_rx_fastforward(pdata, pktwords);
@@ -1032,8 +1152,8 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
if (unlikely(!skb)) {
- SMSC_WARNING(RX_ERR,
- "Unable to allocate skb for rx packet");
+ SMSC_WARN(pdata, rx_err,
+ "Unable to allocate skb for rx packet");
/* Drop the packet and stop this polling iteration */
smsc911x_rx_fastforward(pdata, pktwords);
dev->stats.rx_dropped++;
@@ -1046,8 +1166,8 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
/* Align IP on 16B boundary */
skb_reserve(skb, NET_IP_ALIGN);
skb_put(skb, pktlength - 4);
- smsc911x_rx_readfifo(pdata, (unsigned int *)skb->head,
- pktwords);
+ pdata->ops->rx_readfifo(pdata,
+ (unsigned int *)skb->head, pktwords);
skb->protocol = eth_type_trans(skb, dev);
skb_checksum_none_assert(skb);
netif_receive_skb(skb);
@@ -1083,8 +1203,8 @@ static void smsc911x_rx_multicast_update(struct smsc911x_data *pdata)
smsc911x_mac_write(pdata, MAC_CR, mac_cr);
smsc911x_mac_write(pdata, HASHH, pdata->hashhi);
smsc911x_mac_write(pdata, HASHL, pdata->hashlo);
- SMSC_TRACE(HW, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X",
- mac_cr, pdata->hashhi, pdata->hashlo);
+ SMSC_TRACE(pdata, hw, "maccr 0x%08X, HASHH 0x%08X, HASHL 0x%08X",
+ mac_cr, pdata->hashhi, pdata->hashlo);
}
static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
@@ -1102,7 +1222,7 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
/* Check Rx has stopped */
if (smsc911x_mac_read(pdata, MAC_CR) & MAC_CR_RXEN_)
- SMSC_WARNING(DRV, "Rx not stopped");
+ SMSC_WARN(pdata, drv, "Rx not stopped");
/* Perform the update - safe to do now Rx has stopped */
smsc911x_rx_multicast_update(pdata);
@@ -1131,7 +1251,7 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
} while ((--timeout) && (temp & HW_CFG_SRST_));
if (unlikely(temp & HW_CFG_SRST_)) {
- SMSC_WARNING(DRV, "Failed to complete reset");
+ SMSC_WARN(pdata, drv, "Failed to complete reset");
return -EIO;
}
return 0;
@@ -1160,18 +1280,18 @@ static int smsc911x_open(struct net_device *dev)
/* if the phy is not yet registered, retry later*/
if (!pdata->phy_dev) {
- SMSC_WARNING(HW, "phy_dev is NULL");
+ SMSC_WARN(pdata, hw, "phy_dev is NULL");
return -EAGAIN;
}
if (!is_valid_ether_addr(dev->dev_addr)) {
- SMSC_WARNING(HW, "dev_addr is not a valid MAC address");
+ SMSC_WARN(pdata, hw, "dev_addr is not a valid MAC address");
return -EADDRNOTAVAIL;
}
/* Reset the LAN911x */
if (smsc911x_soft_reset(pdata)) {
- SMSC_WARNING(HW, "soft reset failed");
+ SMSC_WARN(pdata, hw, "soft reset failed");
return -EIO;
}
@@ -1191,8 +1311,8 @@ static int smsc911x_open(struct net_device *dev)
}
if (unlikely(timeout == 0))
- SMSC_WARNING(IFUP,
- "Timed out waiting for EEPROM busy bit to clear");
+ SMSC_WARN(pdata, ifup,
+ "Timed out waiting for EEPROM busy bit to clear");
smsc911x_reg_write(pdata, GPIO_CFG, 0x70070000);
@@ -1210,22 +1330,22 @@ static int smsc911x_open(struct net_device *dev)
intcfg = ((10 << 24) | INT_CFG_IRQ_EN_);
if (pdata->config.irq_polarity) {
- SMSC_TRACE(IFUP, "irq polarity: active high");
+ SMSC_TRACE(pdata, ifup, "irq polarity: active high");
intcfg |= INT_CFG_IRQ_POL_;
} else {
- SMSC_TRACE(IFUP, "irq polarity: active low");
+ SMSC_TRACE(pdata, ifup, "irq polarity: active low");
}
if (pdata->config.irq_type) {
- SMSC_TRACE(IFUP, "irq type: push-pull");
+ SMSC_TRACE(pdata, ifup, "irq type: push-pull");
intcfg |= INT_CFG_IRQ_TYPE_;
} else {
- SMSC_TRACE(IFUP, "irq type: open drain");
+ SMSC_TRACE(pdata, ifup, "irq type: open drain");
}
smsc911x_reg_write(pdata, INT_CFG, intcfg);
- SMSC_TRACE(IFUP, "Testing irq handler using IRQ %d", dev->irq);
+ SMSC_TRACE(pdata, ifup, "Testing irq handler using IRQ %d", dev->irq);
pdata->software_irq_signal = 0;
smp_wmb();
@@ -1241,14 +1361,15 @@ static int smsc911x_open(struct net_device *dev)
}
if (!pdata->software_irq_signal) {
- dev_warn(&dev->dev, "ISR failed signaling test (IRQ %d)\n",
- dev->irq);
+ netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
+ dev->irq);
return -ENODEV;
}
- SMSC_TRACE(IFUP, "IRQ handler passed test using IRQ %d", dev->irq);
+ SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
+ dev->irq);
- dev_info(&dev->dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n",
- (unsigned long)pdata->ioaddr, dev->irq);
+ netdev_info(dev, "SMSC911x/921x identified at %#08lx, IRQ: %d\n",
+ (unsigned long)pdata->ioaddr, dev->irq);
/* Reset the last known duplex and carrier */
pdata->last_duplex = -1;
@@ -1313,7 +1434,7 @@ static int smsc911x_stop(struct net_device *dev)
if (pdata->phy_dev)
phy_stop(pdata->phy_dev);
- SMSC_TRACE(IFDOWN, "Interface stopped");
+ SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
}
@@ -1331,8 +1452,8 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
freespace = smsc911x_reg_read(pdata, TX_FIFO_INF) & TX_FIFO_INF_TDFREE_;
if (unlikely(freespace < TX_FIFO_LOW_THRESHOLD))
- SMSC_WARNING(TX_ERR,
- "Tx data fifo low, space available: %d", freespace);
+ SMSC_WARN(pdata, tx_err,
+ "Tx data fifo low, space available: %d", freespace);
/* Word alignment adjustment */
tx_cmd_a = (u32)((ulong)skb->data & 0x03) << 16;
@@ -1350,7 +1471,7 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
wrsz += (u32)((ulong)skb->data & 0x3);
wrsz >>= 2;
- smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
+ pdata->ops->tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
freespace -= (skb->len + 32);
dev_kfree_skb(skb);
@@ -1432,7 +1553,7 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
* receiving data */
if (!pdata->multicast_update_pending) {
unsigned int temp;
- SMSC_TRACE(HW, "scheduling mcast update");
+ SMSC_TRACE(pdata, hw, "scheduling mcast update");
pdata->multicast_update_pending = 1;
/* Request the hardware to stop, then perform the
@@ -1474,7 +1595,7 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
/* Called when there is a multicast update scheduled and
* it is now safe to complete the update */
- SMSC_TRACE(INTR, "RX Stop interrupt");
+ SMSC_TRACE(pdata, intr, "RX Stop interrupt");
smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
if (pdata->multicast_update_pending)
smsc911x_rx_multicast_update_workaround(pdata);
@@ -1491,7 +1612,7 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
}
if (unlikely(intsts & inten & INT_STS_RXE_)) {
- SMSC_TRACE(INTR, "RX Error interrupt");
+ SMSC_TRACE(pdata, intr, "RX Error interrupt");
smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
serviced = IRQ_HANDLED;
}
@@ -1505,8 +1626,7 @@ static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
/* Schedule a NAPI poll */
__napi_schedule(&pdata->napi);
} else {
- SMSC_WARNING(RX_ERR,
- "napi_schedule_prep failed");
+ SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
}
serviced = IRQ_HANDLED;
}
@@ -1543,7 +1663,7 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
spin_unlock_irq(&pdata->mac_lock);
- dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr);
+ netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
return 0;
}
@@ -1649,9 +1769,9 @@ static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
int timeout = 100;
u32 e2cmd;
- SMSC_TRACE(DRV, "op 0x%08x", op);
+ SMSC_TRACE(pdata, drv, "op 0x%08x", op);
if (smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) {
- SMSC_WARNING(DRV, "Busy at start");
+ SMSC_WARN(pdata, drv, "Busy at start");
return -EBUSY;
}
@@ -1664,12 +1784,12 @@ static int smsc911x_eeprom_send_cmd(struct smsc911x_data *pdata, u32 op)
} while ((e2cmd & E2P_CMD_EPC_BUSY_) && (--timeout));
if (!timeout) {
- SMSC_TRACE(DRV, "TIMED OUT");
+ SMSC_TRACE(pdata, drv, "TIMED OUT");
return -EAGAIN;
}
if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
- SMSC_TRACE(DRV, "Error occured during eeprom operation");
+ SMSC_TRACE(pdata, drv, "Error occurred during eeprom operation");
return -EINVAL;
}
@@ -1682,7 +1802,7 @@ static int smsc911x_eeprom_read_location(struct smsc911x_data *pdata,
u32 op = E2P_CMD_EPC_CMD_READ_ | address;
int ret;
- SMSC_TRACE(DRV, "address 0x%x", address);
+ SMSC_TRACE(pdata, drv, "address 0x%x", address);
ret = smsc911x_eeprom_send_cmd(pdata, op);
if (!ret)
@@ -1698,7 +1818,7 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
u32 temp;
int ret;
- SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data);
+ SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data);
ret = smsc911x_eeprom_send_cmd(pdata, op);
if (!ret) {
@@ -1811,25 +1931,26 @@ static int __devinit smsc911x_init(struct net_device *dev)
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int byte_test;
- SMSC_TRACE(PROBE, "Driver Parameters:");
- SMSC_TRACE(PROBE, "LAN base: 0x%08lX",
- (unsigned long)pdata->ioaddr);
- SMSC_TRACE(PROBE, "IRQ: %d", dev->irq);
- SMSC_TRACE(PROBE, "PHY will be autodetected.");
+ SMSC_TRACE(pdata, probe, "Driver Parameters:");
+ SMSC_TRACE(pdata, probe, "LAN base: 0x%08lX",
+ (unsigned long)pdata->ioaddr);
+ SMSC_TRACE(pdata, probe, "IRQ: %d", dev->irq);
+ SMSC_TRACE(pdata, probe, "PHY will be autodetected.");
spin_lock_init(&pdata->dev_lock);
+ spin_lock_init(&pdata->mac_lock);
if (pdata->ioaddr == 0) {
- SMSC_WARNING(PROBE, "pdata->ioaddr: 0x00000000");
+ SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000");
return -ENODEV;
}
/* Check byte ordering */
byte_test = smsc911x_reg_read(pdata, BYTE_TEST);
- SMSC_TRACE(PROBE, "BYTE_TEST: 0x%08X", byte_test);
+ SMSC_TRACE(pdata, probe, "BYTE_TEST: 0x%08X", byte_test);
if (byte_test == 0x43218765) {
- SMSC_TRACE(PROBE, "BYTE_TEST looks swapped, "
- "applying WORD_SWAP");
+ SMSC_TRACE(pdata, probe, "BYTE_TEST looks swapped, "
+ "applying WORD_SWAP");
smsc911x_reg_write(pdata, WORD_SWAP, 0xffffffff);
/* 1 dummy read of BYTE_TEST is needed after a write to
@@ -1840,12 +1961,13 @@ static int __devinit smsc911x_init(struct net_device *dev)
}
if (byte_test != 0x87654321) {
- SMSC_WARNING(DRV, "BYTE_TEST: 0x%08X", byte_test);
+ SMSC_WARN(pdata, drv, "BYTE_TEST: 0x%08X", byte_test);
if (((byte_test >> 16) & 0xFFFF) == (byte_test & 0xFFFF)) {
- SMSC_WARNING(PROBE,
- "top 16 bits equal to bottom 16 bits");
- SMSC_TRACE(PROBE, "This may mean the chip is set "
- "for 32 bit while the bus is reading 16 bit");
+ SMSC_WARN(pdata, probe,
+ "top 16 bits equal to bottom 16 bits");
+ SMSC_TRACE(pdata, probe,
+ "This may mean the chip is set "
+ "for 32 bit while the bus is reading 16 bit");
}
return -ENODEV;
}
@@ -1880,23 +2002,27 @@ static int __devinit smsc911x_init(struct net_device *dev)
break;
default:
- SMSC_WARNING(PROBE, "LAN911x not identified, idrev: 0x%08X",
- pdata->idrev);
+ SMSC_WARN(pdata, probe, "LAN911x not identified, idrev: 0x%08X",
+ pdata->idrev);
return -ENODEV;
}
- SMSC_TRACE(PROBE, "LAN911x identified, idrev: 0x%08X, generation: %d",
- pdata->idrev, pdata->generation);
+ SMSC_TRACE(pdata, probe,
+ "LAN911x identified, idrev: 0x%08X, generation: %d",
+ pdata->idrev, pdata->generation);
if (pdata->generation == 0)
- SMSC_WARNING(PROBE,
- "This driver is not intended for this chip revision");
+ SMSC_WARN(pdata, probe,
+ "This driver is not intended for this chip revision");
/* workaround for platforms without an eeprom, where the mac address
* is stored elsewhere and set by the bootloader. This saves the
* mac address before resetting the device */
- if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS)
+ if (pdata->config.flags & SMSC911X_SAVE_MAC_ADDRESS) {
+ spin_lock_irq(&pdata->mac_lock);
smsc911x_read_mac_address(dev);
+ spin_unlock_irq(&pdata->mac_lock);
+ }
/* Reset the LAN911x */
if (smsc911x_soft_reset(pdata))
@@ -1927,7 +2053,7 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
BUG_ON(!pdata->ioaddr);
BUG_ON(!pdata->phy_dev);
- SMSC_TRACE(IFDOWN, "Stopping driver.");
+ SMSC_TRACE(pdata, ifdown, "Stopping driver");
phy_disconnect(pdata->phy_dev);
pdata->phy_dev = NULL;
@@ -1951,6 +2077,22 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
return 0;
}
+/* standard register acces */
+static const struct smsc911x_ops standard_smsc911x_ops = {
+ .reg_read = __smsc911x_reg_read,
+ .reg_write = __smsc911x_reg_write,
+ .rx_readfifo = smsc911x_rx_readfifo,
+ .tx_writefifo = smsc911x_tx_writefifo,
+};
+
+/* shifted register access */
+static const struct smsc911x_ops shifted_smsc911x_ops = {
+ .reg_read = __smsc911x_reg_read_shift,
+ .reg_write = __smsc911x_reg_write_shift,
+ .rx_readfifo = smsc911x_rx_readfifo_shift,
+ .tx_writefifo = smsc911x_tx_writefifo_shift,
+};
+
static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
{
struct net_device *dev;
@@ -1961,11 +2103,11 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
int res_size, irq_flags;
int retval;
- pr_info("%s: Driver version %s.\n", SMSC_CHIPNAME, SMSC_DRV_VERSION);
+ pr_info("Driver version %s\n", SMSC_DRV_VERSION);
/* platform data specifies irq & dynamic bus configuration */
if (!pdev->dev.platform_data) {
- pr_warning("%s: platform_data not provided\n", SMSC_CHIPNAME);
+ pr_warn("platform_data not provided\n");
retval = -ENODEV;
goto out_0;
}
@@ -1975,8 +2117,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- pr_warning("%s: Could not allocate resource.\n",
- SMSC_CHIPNAME);
+ pr_warn("Could not allocate resource\n");
retval = -ENODEV;
goto out_0;
}
@@ -1984,8 +2125,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
- pr_warning("%s: Could not allocate irq resource.\n",
- SMSC_CHIPNAME);
+ pr_warn("Could not allocate irq resource\n");
retval = -ENODEV;
goto out_0;
}
@@ -1997,7 +2137,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
dev = alloc_etherdev(sizeof(struct smsc911x_data));
if (!dev) {
- pr_warning("%s: Could not allocate device.\n", SMSC_CHIPNAME);
+ pr_warn("Could not allocate device\n");
retval = -ENOMEM;
goto out_release_io_1;
}
@@ -2017,12 +2157,17 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
pdata->msg_enable = ((1 << debug) - 1);
if (pdata->ioaddr == NULL) {
- SMSC_WARNING(PROBE,
- "Error smsc911x base address invalid");
+ SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
retval = -ENOMEM;
goto out_free_netdev_2;
}
+ /* assume standard, non-shifted, access to HW registers */
+ pdata->ops = &standard_smsc911x_ops;
+ /* apply the right access if shifting is needed */
+ if (config->shift)
+ pdata->ops = &shifted_smsc911x_ops;
+
retval = smsc911x_init(dev);
if (retval < 0)
goto out_unmap_io_3;
@@ -2043,8 +2188,8 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = request_irq(dev->irq, smsc911x_irqhandler,
irq_flags | IRQF_SHARED, dev->name, dev);
if (retval) {
- SMSC_WARNING(PROBE,
- "Unable to claim requested irq: %d", dev->irq);
+ SMSC_WARN(pdata, probe,
+ "Unable to claim requested irq: %d", dev->irq);
goto out_unmap_io_3;
}
@@ -2052,19 +2197,16 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = register_netdev(dev);
if (retval) {
- SMSC_WARNING(PROBE,
- "Error %i registering device", retval);
+ SMSC_WARN(pdata, probe, "Error %i registering device", retval);
goto out_unset_drvdata_4;
} else {
- SMSC_TRACE(PROBE, "Network interface: \"%s\"", dev->name);
+ SMSC_TRACE(pdata, probe,
+ "Network interface: \"%s\"", dev->name);
}
- spin_lock_init(&pdata->mac_lock);
-
retval = smsc911x_mii_init(pdev, dev);
if (retval) {
- SMSC_WARNING(PROBE,
- "Error %i initialising mii", retval);
+ SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
goto out_unregister_netdev_5;
}
@@ -2073,10 +2215,12 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
/* Check if mac address has been specified when bringing interface up */
if (is_valid_ether_addr(dev->dev_addr)) {
smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
- SMSC_TRACE(PROBE, "MAC Address is specified by configuration");
+ SMSC_TRACE(pdata, probe,
+ "MAC Address is specified by configuration");
} else if (is_valid_ether_addr(pdata->config.mac)) {
memcpy(dev->dev_addr, pdata->config.mac, 6);
- SMSC_TRACE(PROBE, "MAC Address specified by platform data");
+ SMSC_TRACE(pdata, probe,
+ "MAC Address specified by platform data");
} else {
/* Try reading mac address from device. if EEPROM is present
* it will already have been set */
@@ -2084,20 +2228,20 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (is_valid_ether_addr(dev->dev_addr)) {
/* eeprom values are valid so use them */
- SMSC_TRACE(PROBE,
- "Mac Address is read from LAN911x EEPROM");
+ SMSC_TRACE(pdata, probe,
+ "Mac Address is read from LAN911x EEPROM");
} else {
/* eeprom values are invalid, generate random MAC */
random_ether_addr(dev->dev_addr);
smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
- SMSC_TRACE(PROBE,
- "MAC Address is set to random_ether_addr");
+ SMSC_TRACE(pdata, probe,
+ "MAC Address is set to random_ether_addr");
}
}
spin_unlock_irq(&pdata->mac_lock);
- dev_info(&dev->dev, "MAC Address: %pM\n", dev->dev_addr);
+ netdev_info(dev, "MAC Address: %pM\n", dev->dev_addr);
return 0;
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 50f712e99e96..8d67aacf8867 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -33,25 +33,21 @@
* can be successfully looped back */
#define USE_PHY_WORK_AROUND
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- ((void)((NETIF_MSG_##nlevel & pdata->msg_enable) && \
- printk(KERN_##klevel "%s: %s: " fmt "\n", \
- pdata->dev->name, __func__, ## args)))
-
#if USE_DEBUG >= 1
-#define SMSC_WARNING(nlevel, fmt, args...) \
- DPRINTK(nlevel, WARNING, fmt, ## args)
+#define SMSC_WARN(pdata, nlevel, fmt, args...) \
+ netif_warn(pdata, nlevel, (pdata)->dev, \
+ "%s: " fmt "\n", __func__, ##args)
#else
-#define SMSC_WARNING(nlevel, fmt, args...) \
- ({ do {} while (0); 0; })
+#define SMSC_WARN(pdata, nlevel, fmt, args...) \
+ no_printk(fmt "\n", ##args)
#endif
#if USE_DEBUG >= 2
-#define SMSC_TRACE(nlevel, fmt, args...) \
- DPRINTK(nlevel, INFO, fmt, ## args)
+#define SMSC_TRACE(pdata, nlevel, fmt, args...) \
+ netif_info(pdata, nlevel, pdata->dev, fmt "\n", ##args)
#else
-#define SMSC_TRACE(nlevel, fmt, args...) \
- ({ do {} while (0); 0; })
+#define SMSC_TRACE(pdata, nlevel, fmt, args...) \
+ no_printk(fmt "\n", ##args)
#endif
#ifdef CONFIG_DEBUG_SPINLOCK
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index b09ee1c319e8..4c92ad8be765 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -364,7 +364,7 @@ static int smsc9420_eeprom_send_cmd(struct smsc9420_pdata *pd, u32 op)
}
if (e2cmd & E2P_CMD_EPC_TIMEOUT_) {
- smsc_info(HW, "Error occured during eeprom operation");
+ smsc_info(HW, "Error occurred during eeprom operation");
return -EINVAL;
}
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index cb6bcca9d541..949f124e1278 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -994,15 +994,13 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
skb->protocol = eth_type_trans(skb, netdev);
/* checksum offload */
- if (card->options.rx_csum) {
+ skb_checksum_none_assert(skb);
+ if (netdev->features & NETIF_F_RXCSUM) {
if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
!(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- skb_checksum_none_assert(skb);
- } else
- skb_checksum_none_assert(skb);
+ }
if (data_status & SPIDER_NET_VLAN_PACKET) {
/* further enhancements: HW-accel VLAN
@@ -2322,14 +2320,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
card->aneg_timer.function = spider_net_link_phy;
card->aneg_timer.data = (unsigned long) card;
- card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
-
netif_napi_add(netdev, &card->napi,
spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
spider_net_setup_netdev_ops(netdev);
- netdev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX;
+ netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM;
+ if (SPIDER_NET_RX_CSUM_DEFAULT)
+ netdev->features |= NETIF_F_RXCSUM;
+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_LLTX;
/* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
* NETIF_F_HW_VLAN_FILTER */
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 05f74cbdd617..020f64a8fcf7 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -429,12 +429,6 @@ struct spider_net_descr_chain {
* 701b8000 would be correct, but every packets gets that flag */
#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
-/* this will be bigger some time */
-struct spider_net_options {
- int rx_csum; /* for rx: if 0 ip_summed=NONE,
- if 1 and hw has verified, ip_summed=UNNECESSARY */
-};
-
#define SPIDER_NET_DEFAULT_MSG ( NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK | \
@@ -487,7 +481,6 @@ struct spider_net_card {
/* for ethtool */
int msg_enable;
struct spider_net_extra_stats spider_stats;
- struct spider_net_options options;
/* Must be last item in struct */
struct spider_net_descr darray[0];
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index 5bae728c3820..9c288cd7d171 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -58,7 +58,7 @@ spider_net_ethtool_get_settings(struct net_device *netdev,
cmd->advertising = (ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE);
cmd->port = PORT_FIBRE;
- cmd->speed = card->phy.speed;
+ ethtool_cmd_speed_set(cmd, card->phy.speed);
cmd->duplex = DUPLEX_FULL;
return 0;
@@ -115,24 +115,6 @@ spider_net_ethtool_nway_reset(struct net_device *netdev)
return 0;
}
-static u32
-spider_net_ethtool_get_rx_csum(struct net_device *netdev)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- return card->options.rx_csum;
-}
-
-static int
-spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
-{
- struct spider_net_card *card = netdev_priv(netdev);
-
- card->options.rx_csum = n;
- return 0;
-}
-
-
static void
spider_net_ethtool_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ering)
@@ -189,9 +171,6 @@ const struct ethtool_ops spider_net_ethtool_ops = {
.set_msglevel = spider_net_ethtool_set_msglevel,
.get_link = ethtool_op_get_link,
.nway_reset = spider_net_ethtool_nway_reset,
- .get_rx_csum = spider_net_ethtool_get_rx_csum,
- .set_rx_csum = spider_net_ethtool_set_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
.get_ringparam = spider_net_ethtool_get_ringparam,
.get_strings = spider_net_get_strings,
.get_sset_count = spider_net_get_sset_count,
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 6ae4c3f4c63c..f20455cbfbbc 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -178,10 +178,11 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
{
unsigned int pmt = 0;
- if (mode == WAKE_MAGIC) {
+ if (mode & WAKE_MAGIC) {
CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
pmt |= power_down | magic_pkt_en;
- } else if (mode == WAKE_UCAST) {
+ }
+ if (mode & WAKE_UCAST) {
CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
pmt |= global_unicast;
}
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d65fab1ba790..e25093510b0c 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -26,9 +26,9 @@
#undef DWMAC_DMA_DEBUG
#ifdef DWMAC_DMA_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
+#define DWMAC_LIB_DBG(fmt, args...) printk(fmt, ## args)
#else
-#define DBG(fmt, args...) do { } while (0)
+#define DWMAC_LIB_DBG(fmt, args...) do { } while (0)
#endif
/* CSR1 enables the transmit DMA to check for new descriptor */
@@ -152,7 +152,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
- DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+ DWMAC_LIB_DBG(KERN_INFO "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
#ifdef DWMAC_DMA_DEBUG
/* It displays the DMA process states (CSR5 register) */
show_tx_process_state(intr_status);
@@ -160,43 +160,43 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
#endif
/* ABNORMAL interrupts */
if (unlikely(intr_status & DMA_STATUS_AIS)) {
- DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ DWMAC_LIB_DBG(KERN_INFO "CSR5[15] DMA ABNORMAL IRQ: ");
if (unlikely(intr_status & DMA_STATUS_UNF)) {
- DBG(INFO, "transmit underflow\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit underflow\n");
ret = tx_hard_error_bump_tc;
x->tx_undeflow_irq++;
}
if (unlikely(intr_status & DMA_STATUS_TJT)) {
- DBG(INFO, "transmit jabber\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit jabber\n");
x->tx_jabber_irq++;
}
if (unlikely(intr_status & DMA_STATUS_OVF)) {
- DBG(INFO, "recv overflow\n");
+ DWMAC_LIB_DBG(KERN_INFO "recv overflow\n");
x->rx_overflow_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RU)) {
- DBG(INFO, "receive buffer unavailable\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive buffer unavailable\n");
x->rx_buf_unav_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RPS)) {
- DBG(INFO, "receive process stopped\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive process stopped\n");
x->rx_process_stopped_irq++;
}
if (unlikely(intr_status & DMA_STATUS_RWT)) {
- DBG(INFO, "receive watchdog\n");
+ DWMAC_LIB_DBG(KERN_INFO "receive watchdog\n");
x->rx_watchdog_irq++;
}
if (unlikely(intr_status & DMA_STATUS_ETI)) {
- DBG(INFO, "transmit early interrupt\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit early interrupt\n");
x->tx_early_irq++;
}
if (unlikely(intr_status & DMA_STATUS_TPS)) {
- DBG(INFO, "transmit process stopped\n");
+ DWMAC_LIB_DBG(KERN_INFO "transmit process stopped\n");
x->tx_process_stopped_irq++;
ret = tx_hard_error;
}
if (unlikely(intr_status & DMA_STATUS_FBI)) {
- DBG(INFO, "fatal bus error\n");
+ DWMAC_LIB_DBG(KERN_INFO "fatal bus error\n");
x->fatal_bus_error_irq++;
ret = tx_hard_error;
}
@@ -215,7 +215,7 @@ int dwmac_dma_interrupt(void __iomem *ioaddr,
/* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
- DBG(INFO, "\n\n");
+ DWMAC_LIB_DBG(KERN_INFO "\n\n");
return ret;
}
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
index cd0cc76f7a1c..029c2a2cf524 100644
--- a/drivers/net/stmmac/norm_desc.c
+++ b/drivers/net/stmmac/norm_desc.c
@@ -67,7 +67,7 @@ static int ndesc_get_tx_len(struct dma_desc *p)
/* This function verifies if each incoming frame has some errors
* and, if required, updates the multicast statistics.
- * In case of success, it returns csum_none becasue the device
+ * In case of success, it returns csum_none because the device
* is not able to compute the csum in HW. */
static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
struct dma_desc *p)
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 5f06c4706abe..2b076b313622 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -21,7 +21,6 @@
*******************************************************************************/
#define DRV_MODULE_VERSION "Nov_2010"
-#include <linux/platform_device.h>
#include <linux/stmmac.h>
#include "common.h"
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index fd719edc7f7c..ae5213a8c4cd 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -197,13 +197,6 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
}
}
-static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- return priv->rx_coe;
-}
-
static void
stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
@@ -241,20 +234,11 @@ stmmac_set_pauseparam(struct net_device *netdev,
new_pause |= FLOW_TX;
priv->flow_ctrl = new_pause;
+ phy->autoneg = pause->autoneg;
if (phy->autoneg) {
- if (netif_running(netdev)) {
- struct ethtool_cmd cmd;
- /* auto-negotiation automatically restarted */
- cmd.cmd = ETHTOOL_NWAY_RST;
- cmd.supported = phy->supported;
- cmd.advertising = phy->advertising;
- cmd.autoneg = phy->autoneg;
- cmd.speed = phy->speed;
- cmd.duplex = phy->duplex;
- cmd.phy_address = phy->addr;
- ret = phy_ethtool_sset(phy, &cmd);
- }
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phy);
} else
priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
priv->flow_ctrl, priv->pause);
@@ -315,7 +299,7 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
spin_lock_irq(&priv->lock);
if (device_can_wakeup(priv->device)) {
- wol->supported = WAKE_MAGIC;
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
wol->wolopts = priv->wolopts;
}
spin_unlock_irq(&priv->lock);
@@ -324,7 +308,7 @@ static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct stmmac_priv *priv = netdev_priv(dev);
- u32 support = WAKE_MAGIC;
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
if (!device_can_wakeup(priv->device))
return -EINVAL;
@@ -358,11 +342,6 @@ static struct ethtool_ops stmmac_ethtool_ops = {
.get_regs = stmmac_ethtool_gregs,
.get_regs_len = stmmac_ethtool_get_regs_len,
.get_link = ethtool_op_get_link,
- .get_rx_csum = stmmac_ethtool_get_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
.get_pauseparam = stmmac_get_pauseparam,
.set_pauseparam = stmmac_set_pauseparam,
.get_ethtool_stats = stmmac_get_ethtool_stats,
@@ -370,8 +349,6 @@ static struct ethtool_ops stmmac_ethtool_ops = {
.get_wol = stmmac_get_wol,
.set_wol = stmmac_set_wol,
.get_sset_count = stmmac_get_sset_count,
- .get_tso = ethtool_op_get_tso,
- .set_tso = ethtool_op_set_tso,
};
void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 0e5f03135b50..e15c4a0bb96d 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -116,9 +116,6 @@ static int tc = TC_DEFAULT;
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");
-#define RX_NO_COALESCE 1 /* Always interrupt on completion */
-#define TX_NO_COALESCE -1 /* No moderation by default */
-
/* Pay attention to tune this parameter; take care of both
* hardware capability and network stabitily/performance impact.
* Many tests showed that ~4ms latency seems to be good enough. */
@@ -139,7 +136,6 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
-static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev);
/**
* stmmac_verify_args - verify the driver parameters.
@@ -750,7 +746,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
priv->xstats.threshold = tc;
}
- stmmac_tx_err(priv);
} else if (unlikely(status == tx_hard_error))
stmmac_tx_err(priv);
}
@@ -781,21 +776,6 @@ static int stmmac_open(struct net_device *dev)
stmmac_verify_args();
- ret = stmmac_init_phy(dev);
- if (unlikely(ret)) {
- pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
- return ret;
- }
-
- /* Request the IRQ lines */
- ret = request_irq(dev->irq, stmmac_interrupt,
- IRQF_SHARED, dev->name, dev);
- if (unlikely(ret < 0)) {
- pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
- __func__, dev->irq, ret);
- return ret;
- }
-
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
if (unlikely(priv->tm == NULL)) {
@@ -814,6 +794,11 @@ static int stmmac_open(struct net_device *dev)
} else
priv->tm->enable = 1;
#endif
+ ret = stmmac_init_phy(dev);
+ if (unlikely(ret)) {
+ pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
+ goto open_error;
+ }
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
@@ -822,12 +807,11 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
- priv->dma_tx_phy,
- priv->dma_rx_phy) < 0)) {
-
+ ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
+ priv->dma_tx_phy, priv->dma_rx_phy);
+ if (ret < 0) {
pr_err("%s: DMA initialization failed\n", __func__);
- return -1;
+ goto open_error;
}
/* Copy the MAC addr into the HW */
@@ -843,11 +827,21 @@ static int stmmac_open(struct net_device *dev)
pr_info("stmmac: Rx Checksum Offload Engine supported\n");
if (priv->plat->tx_coe)
pr_info("\tTX Checksum insertion supported\n");
+ netdev_update_features(dev);
/* Initialise the MMC (if present) to disable all interrupts. */
writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
+ /* Request the IRQ lines */
+ ret = request_irq(dev->irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ goto open_error;
+ }
+
/* Enable the MAC Rx/Tx */
stmmac_enable_mac(priv->ioaddr);
@@ -878,7 +872,17 @@ static int stmmac_open(struct net_device *dev)
napi_enable(&priv->napi);
skb_queue_head_init(&priv->rx_recycle);
netif_start_queue(dev);
+
return 0;
+
+open_error:
+#ifdef CONFIG_STMMAC_TIMER
+ kfree(priv->tm);
+#endif
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
+
+ return ret;
}
/**
@@ -927,46 +931,6 @@ static int stmmac_release(struct net_device *dev)
return 0;
}
-/*
- * To perform emulated hardware segmentation on skb.
- */
-static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
-{
- struct sk_buff *segs, *curr_skb;
- int gso_segs = skb_shinfo(skb)->gso_segs;
-
- /* Estimate the number of fragments in the worst case */
- if (unlikely(stmmac_tx_avail(priv) < gso_segs)) {
- netif_stop_queue(priv->dev);
- TX_DBG(KERN_ERR "%s: TSO BUG! Tx Ring full when queue awake\n",
- __func__);
- if (stmmac_tx_avail(priv) < gso_segs)
- return NETDEV_TX_BUSY;
-
- netif_wake_queue(priv->dev);
- }
- TX_DBG("\tstmmac_sw_tso: segmenting: skb %p (len %d)\n",
- skb, skb->len);
-
- segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
- if (IS_ERR(segs))
- goto sw_tso_end;
-
- do {
- curr_skb = segs;
- segs = segs->next;
- TX_DBG("\t\tcurrent skb->len: %d, *curr %p,"
- "*next %p\n", curr_skb->len, curr_skb, segs);
- curr_skb->next = NULL;
- stmmac_xmit(curr_skb, priv->dev);
- } while (segs);
-
-sw_tso_end:
- dev_kfree_skb(skb);
-
- return NETDEV_TX_OK;
-}
-
static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
struct net_device *dev,
int csum_insertion)
@@ -1044,16 +1008,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
!skb_is_gso(skb) ? "isn't" : "is");
#endif
- if (unlikely(skb_is_gso(skb)))
- return stmmac_sw_tso(priv, skb);
-
- if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
- if (unlikely((!priv->plat->tx_coe) ||
- (priv->no_csum_insertion)))
- skb_checksum_help(skb);
- else
- csum_insertion = 1;
- }
+ csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
desc = priv->dma_tx + entry;
first = desc;
@@ -1373,18 +1328,29 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
+ dev->mtu = new_mtu;
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static u32 stmmac_fix_features(struct net_device *dev, u32 features)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->rx_coe)
+ features &= ~NETIF_F_RXCSUM;
+ if (!priv->plat->tx_coe)
+ features &= ~NETIF_F_ALL_CSUM;
+
/* Some GMAC devices have a bugged Jumbo frame support that
* needs to have the Tx COE disabled for oversized frames
* (due to limited buffer sizes). In this case we disable
* the TX csum insertionin the TDES and not use SF. */
- if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
- priv->no_csum_insertion = 1;
- else
- priv->no_csum_insertion = 0;
+ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
+ features &= ~NETIF_F_ALL_CSUM;
- dev->mtu = new_mtu;
-
- return 0;
+ return features;
}
static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
@@ -1464,6 +1430,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_start_xmit = stmmac_xmit,
.ndo_stop = stmmac_release,
.ndo_change_mtu = stmmac_change_mtu,
+ .ndo_fix_features = stmmac_fix_features,
.ndo_set_multicast_list = stmmac_multicast_list,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
@@ -1494,8 +1461,8 @@ static int stmmac_probe(struct net_device *dev)
dev->netdev_ops = &stmmac_netdev_ops;
stmmac_set_ethtool_ops(dev);
- dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
dev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
diff --git a/drivers/net/sunbmac.h b/drivers/net/sunbmac.h
index 8db88945b889..4943e975a731 100644
--- a/drivers/net/sunbmac.h
+++ b/drivers/net/sunbmac.h
@@ -185,7 +185,7 @@
#define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */
#define BIGMAC_RXCFG_FIFO 0x0000000e /* Default rx fthresh... */
#define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */
-#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscous mode */
+#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */
#define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */
#define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */
#define BIGMAC_RXCFG_ME 0x00000200 /* Receive packets addressed to me */
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index c1a344829b54..ab5930099267 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1150,7 +1150,7 @@ static void gem_pcs_reinit_adv(struct gem *gp)
val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
writel(val, gp->regs + PCS_CFG);
- /* Advertise all capabilities except assymetric
+ /* Advertise all capabilities except asymmetric
* pause.
*/
val = readl(gp->regs + PCS_MIIADV);
@@ -1294,7 +1294,7 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
autoneg = 1;
} else {
autoneg = 0;
- speed = ep->speed;
+ speed = ethtool_cmd_speed(ep);
duplex = ep->duplex;
}
@@ -2642,7 +2642,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
/* Return current PHY settings */
spin_lock_irq(&gp->lock);
cmd->autoneg = gp->want_autoneg;
- cmd->speed = gp->phy_mii.speed;
+ ethtool_cmd_speed_set(cmd, gp->phy_mii.speed);
cmd->duplex = gp->phy_mii.duplex;
cmd->advertising = gp->phy_mii.advertising;
@@ -2659,7 +2659,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg);
cmd->advertising = cmd->supported;
- cmd->speed = 0;
+ ethtool_cmd_speed_set(cmd, 0);
cmd->duplex = cmd->port = cmd->phy_address =
cmd->transceiver = cmd->autoneg = 0;
@@ -2673,7 +2673,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising = cmd->supported;
cmd->transceiver = XCVR_INTERNAL;
if (gp->lstate == link_up)
- cmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
cmd->duplex = DUPLEX_FULL;
cmd->autoneg = 1;
}
@@ -2686,6 +2686,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct gem *gp = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
/* Verify the settings we care about. */
if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -2697,9 +2698,9 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return -EINVAL;
if (cmd->autoneg == AUTONEG_DISABLE &&
- ((cmd->speed != SPEED_1000 &&
- cmd->speed != SPEED_100 &&
- cmd->speed != SPEED_10) ||
+ ((speed != SPEED_1000 &&
+ speed != SPEED_100 &&
+ speed != SPEED_10) ||
(cmd->duplex != DUPLEX_HALF &&
cmd->duplex != DUPLEX_FULL)))
return -EINVAL;
@@ -3146,7 +3147,8 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
gp->phy_mii.def ? gp->phy_mii.def->name : "no");
/* GEM can do it all... */
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX;
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= dev->hw_features | NETIF_F_RXCSUM | NETIF_F_LLTX;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index eb4f59fb01e9..d381a0f9ee18 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1383,7 +1383,7 @@ force_link:
if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
hp->sw_bmcr = BMCR_SPEED100;
} else {
- if (ep->speed == SPEED_100)
+ if (ethtool_cmd_speed(ep) == SPEED_100)
hp->sw_bmcr = BMCR_SPEED100;
else
hp->sw_bmcr = 0;
@@ -2401,6 +2401,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct happy_meal *hp = netdev_priv(dev);
+ u32 speed;
cmd->supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
@@ -2420,10 +2421,9 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (hp->sw_bmcr & BMCR_ANENABLE) {
cmd->autoneg = AUTONEG_ENABLE;
- cmd->speed =
- (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
- SPEED_100 : SPEED_10;
- if (cmd->speed == SPEED_100)
+ speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
+ SPEED_100 : SPEED_10);
+ if (speed == SPEED_100)
cmd->duplex =
(hp->sw_lpa & (LPA_100FULL)) ?
DUPLEX_FULL : DUPLEX_HALF;
@@ -2433,13 +2433,12 @@ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DUPLEX_FULL : DUPLEX_HALF;
} else {
cmd->autoneg = AUTONEG_DISABLE;
- cmd->speed =
- (hp->sw_bmcr & BMCR_SPEED100) ?
- SPEED_100 : SPEED_10;
+ speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
cmd->duplex =
(hp->sw_bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
+ ethtool_cmd_speed_set(cmd, speed);
return 0;
}
@@ -2452,8 +2451,8 @@ static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->autoneg != AUTONEG_DISABLE)
return -EINVAL;
if (cmd->autoneg == AUTONEG_DISABLE &&
- ((cmd->speed != SPEED_100 &&
- cmd->speed != SPEED_10) ||
+ ((ethtool_cmd_speed(cmd) != SPEED_100 &&
+ ethtool_cmd_speed(cmd) != SPEED_10) ||
(cmd->duplex != DUPLEX_HALF &&
cmd->duplex != DUPLEX_FULL)))
return -EINVAL;
@@ -2788,7 +2787,8 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
dev->ethtool_ops = &hme_ethtool_ops;
/* Happy Meal can do it all... */
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= dev->hw_features | NETIF_F_RXCSUM;
dev->irq = op->archdata.irqs[0];
@@ -3113,7 +3113,8 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
dev->dma = 0;
/* Happy Meal can do it all... */
- dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->features |= dev->hw_features | NETIF_F_RXCSUM;
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
/* Hook up PCI register/descriptor accessors. */
diff --git a/drivers/net/sunhme.h b/drivers/net/sunhme.h
index 756b5bf3aa89..64f278360d89 100644
--- a/drivers/net/sunhme.h
+++ b/drivers/net/sunhme.h
@@ -223,7 +223,7 @@
/* BigMac receive config register. */
#define BIGMAC_RXCFG_ENABLE 0x00000001 /* Enable the receiver */
#define BIGMAC_RXCFG_PSTRIP 0x00000020 /* Pad byte strip enable */
-#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscous mode */
+#define BIGMAC_RXCFG_PMISC 0x00000040 /* Enable promiscuous mode */
#define BIGMAC_RXCFG_DERR 0x00000080 /* Disable error checking */
#define BIGMAC_RXCFG_DCRCS 0x00000100 /* Disable CRC stripping */
#define BIGMAC_RXCFG_REJME 0x00000200 /* Reject packets addressed to me */
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index b6eec8cea209..7ca51cebcddd 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -119,13 +119,13 @@ struct tc35815_regs {
/*
* Bit assignments
*/
-/* DMA_Ctl bit asign ------------------------------------------------------- */
+/* DMA_Ctl bit assign ------------------------------------------------------- */
#define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */
#define DMA_RxAlign_1 0x00400000
#define DMA_RxAlign_2 0x00800000
#define DMA_RxAlign_3 0x00c00000
#define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */
-#define DMA_IntMask 0x00040000 /* 1:Interupt mask */
+#define DMA_IntMask 0x00040000 /* 1:Interrupt mask */
#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */
#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */
#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */
@@ -134,11 +134,11 @@ struct tc35815_regs {
#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */
#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */
-/* RxFragSize bit asign ---------------------------------------------------- */
+/* RxFragSize bit assign ---------------------------------------------------- */
#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */
#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */
-/* MAC_Ctl bit asign ------------------------------------------------------- */
+/* MAC_Ctl bit assign ------------------------------------------------------- */
#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */
#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */
#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */
@@ -152,7 +152,7 @@ struct tc35815_regs {
#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */
#define MAC_HaltReq 0x00000001 /* 1:Halt request */
-/* PROM_Ctl bit asign ------------------------------------------------------ */
+/* PROM_Ctl bit assign ------------------------------------------------------ */
#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */
#define PROM_Read 0x00004000 /*10:Read operation */
#define PROM_Write 0x00002000 /*01:Write operation */
@@ -162,7 +162,7 @@ struct tc35815_regs {
#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */
/*00xxxx: disable */
-/* CAM_Ctl bit asign ------------------------------------------------------- */
+/* CAM_Ctl bit assign ------------------------------------------------------- */
#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */
#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/
/* accept other */
@@ -170,7 +170,7 @@ struct tc35815_regs {
#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */
#define CAM_StationAcc 0x00000001 /* 1:unicast accept */
-/* CAM_Ena bit asign ------------------------------------------------------- */
+/* CAM_Ena bit assign ------------------------------------------------------- */
#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */
#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */
#define CAM_Ena_Bit(index) (1 << (index))
@@ -178,7 +178,7 @@ struct tc35815_regs {
#define CAM_ENTRY_SOURCE 1
#define CAM_ENTRY_MACCTL 20
-/* Tx_Ctl bit asign -------------------------------------------------------- */
+/* Tx_Ctl bit assign -------------------------------------------------------- */
#define Tx_En 0x00000001 /* 1:Transmit enable */
#define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */
#define Tx_NoPad 0x00000004 /* 1:Suppress Padding */
@@ -192,7 +192,7 @@ struct tc35815_regs {
#define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */
#define Tx_EnComp 0x00004000 /* 1:Enable Completion */
-/* Tx_Stat bit asign ------------------------------------------------------- */
+/* Tx_Stat bit assign ------------------------------------------------------- */
#define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */
#define Tx_ExColl 0x00000010 /* Excessive Collision */
#define Tx_TXDefer 0x00000020 /* Transmit Defered */
@@ -208,7 +208,7 @@ struct tc35815_regs {
#define Tx_Halted 0x00008000 /* Tx Halted */
#define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */
-/* Rx_Ctl bit asign -------------------------------------------------------- */
+/* Rx_Ctl bit assign -------------------------------------------------------- */
#define Rx_EnGood 0x00004000 /* 1:Enable Good */
#define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */
#define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */
@@ -222,7 +222,7 @@ struct tc35815_regs {
#define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */
#define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */
-/* Rx_Stat bit asign ------------------------------------------------------- */
+/* Rx_Stat bit assign ------------------------------------------------------- */
#define Rx_Halted 0x00008000 /* Rx Halted */
#define Rx_Good 0x00004000 /* Rx Good */
#define Rx_RxPar 0x00002000 /* Rx Parity Error */
@@ -238,7 +238,7 @@ struct tc35815_regs {
#define Rx_Stat_Mask 0x0000FFF0 /* Rx All Status Mask */
-/* Int_En bit asign -------------------------------------------------------- */
+/* Int_En bit assign -------------------------------------------------------- */
#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */
#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */
@@ -253,7 +253,7 @@ struct tc35815_regs {
#define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */
/* Exhausted Enable */
-/* Int_Src bit asign ------------------------------------------------------- */
+/* Int_Src bit assign ------------------------------------------------------- */
#define Int_NRabt 0x00004000 /* 1:Non Recoverable error */
#define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */
#define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */
@@ -270,8 +270,8 @@ struct tc35815_regs {
#define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */
#define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */
-/* MD_CA bit asign --------------------------------------------------------- */
-#define MD_CA_PreSup 0x00001000 /* 1:Preamble Supress */
+/* MD_CA bit assign --------------------------------------------------------- */
+#define MD_CA_PreSup 0x00001000 /* 1:Preamble Suppress */
#define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */
#define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */
@@ -296,7 +296,7 @@ struct BDesc {
#define FD_ALIGN 16
-/* Frame Descripter bit asign ---------------------------------------------- */
+/* Frame Descripter bit assign ---------------------------------------------- */
#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
@@ -309,8 +309,8 @@ struct BDesc {
#define FD_Next_EOL 0x00000001 /* FD EOL indicator */
#define FD_BDCnt_SHIFT 16
-/* Buffer Descripter bit asign --------------------------------------------- */
-#define BD_BuffLength_MASK 0x0000FFFF /* Recieve Data Size */
+/* Buffer Descripter bit assign --------------------------------------------- */
+#define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */
#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
#define BD_CownsBD 0x80000000 /* BD Controller owner bit */
@@ -339,7 +339,7 @@ struct BDesc {
#define TX_THRESHOLD 1024
/* used threshold with packet max byte for low pci transfer ability.*/
#define TX_THRESHOLD_MAX 1536
-/* setting threshold max value when overrun error occured this count. */
+/* setting threshold max value when overrun error occurred this count. */
#define TX_THRESHOLD_KEEP_LIMIT 10
/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 3397618d4d96..80fbee0d40af 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -645,7 +645,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
if (cmd != SIOCDEVPRIVATE) {
error = copy_from_user(data, ifr->ifr_data, sizeof(data));
if (error) {
- pr_err("cant copy from user\n");
+ pr_err("can't copy from user\n");
RET(-EFAULT);
}
DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
@@ -999,7 +999,7 @@ static inline void bdx_rxdb_free_elem(struct rxdb *db, int n)
*
* RxD fifo is smaller than RxF fifo by design. Upon high load, RxD will be
* filled and packets will be dropped by nic without getting into host or
- * cousing interrupt. Anyway, in that condition, host has no chance to proccess
+ * cousing interrupt. Anyway, in that condition, host has no chance to process
* all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles
*/
@@ -1200,8 +1200,8 @@ static void bdx_recycle_skb(struct bdx_priv *priv, struct rxd_desc *rxdd)
RET();
}
-/* bdx_rx_receive - recieves full packets from RXD fifo and pass them to OS
- * NOTE: a special treatment is given to non-continous descriptors
+/* bdx_rx_receive - receives full packets from RXD fifo and pass them to OS
+ * NOTE: a special treatment is given to non-continuous descriptors
* that start near the end, wraps around and continue at the beginning. a second
* part is copied right after the first, and then descriptor is interpreted as
* normal. fifo has an extra space to allow such operations
@@ -1584,9 +1584,9 @@ err_mem:
}
/*
- * bdx_tx_space - calculates avalable space in TX fifo
+ * bdx_tx_space - calculates available space in TX fifo
* @priv - NIC private structure
- * Returns avaliable space in TX fifo in bytes
+ * Returns available space in TX fifo in bytes
*/
static inline int bdx_tx_space(struct bdx_priv *priv)
{
@@ -2017,9 +2017,11 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ndev->irq = pdev->irq;
ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
| NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER
+ NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
/*| NETIF_F_FRAGLIST */
;
+ ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_HW_VLAN_TX;
if (pci_using_dac)
ndev->features |= NETIF_F_HIGHDMA;
@@ -2149,7 +2151,7 @@ static int bdx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->speed = SPEED_10000;
+ ethtool_cmd_speed_set(ecmd, SPEED_10000);
ecmd->duplex = DUPLEX_FULL;
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_EXTERNAL; /* what does it mean? */
@@ -2188,24 +2190,6 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
}
/*
- * bdx_get_rx_csum - report whether receive checksums are turned on or off
- * @netdev
- */
-static u32 bdx_get_rx_csum(struct net_device *netdev)
-{
- return 1; /* always on */
-}
-
-/*
- * bdx_get_tx_csum - report whether transmit checksums are turned on or off
- * @netdev
- */
-static u32 bdx_get_tx_csum(struct net_device *netdev)
-{
- return (netdev->features & NETIF_F_IP_CSUM) != 0;
-}
-
-/*
* bdx_get_coalesce - get interrupt coalescing parameters
* @netdev
* @ecoal
@@ -2424,10 +2408,6 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
.set_coalesce = bdx_set_coalesce,
.get_ringparam = bdx_get_ringparam,
.set_ringparam = bdx_set_ringparam,
- .get_rx_csum = bdx_get_rx_csum,
- .get_tx_csum = bdx_get_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .get_tso = ethtool_op_get_tso,
.get_strings = bdx_get_strings,
.get_sset_count = bdx_get_sset_count,
.get_ethtool_stats = bdx_get_ethtool_stats,
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index b6ba8601e2b5..c5642fefc9e7 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -502,7 +502,7 @@ struct txd_desc {
#define GMAC_RX_FILTER_ACRC 0x0010 /* accept crc error */
#define GMAC_RX_FILTER_AM 0x0008 /* accept multicast */
#define GMAC_RX_FILTER_AB 0x0004 /* accept broadcast */
-#define GMAC_RX_FILTER_PRM 0x0001 /* [0:1] promiscous mode */
+#define GMAC_RX_FILTER_PRM 0x0001 /* [0:1] promiscuous mode */
#define MAX_FRAME_AB_VAL 0x3fff /* 13:0 */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 73c942d85f07..d5a1f9e3794c 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -62,12 +62,36 @@
#include "tg3.h"
+/* Functions & macros to verify TG3_FLAGS types */
+
+static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
+{
+ return test_bit(flag, bits);
+}
+
+static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
+{
+ set_bit(flag, bits);
+}
+
+static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
+{
+ clear_bit(flag, bits);
+}
+
+#define tg3_flag(tp, flag) \
+ _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
+#define tg3_flag_set(tp, flag) \
+ _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
+#define tg3_flag_clear(tp, flag) \
+ _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
+
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 117
+#define TG3_MIN_NUM 118
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "January 25, 2011"
+#define DRV_MODULE_RELDATE "April 22, 2011"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -85,26 +109,25 @@
/* length of time before we decide the hardware is borked,
* and dev->tx_timeout() should be called to fix the problem
*/
+
#define TG3_TX_TIMEOUT (5 * HZ)
/* hardware minimum and maximum for a single frame's data payload */
#define TG3_MIN_MTU 60
#define TG3_MAX_MTU(tp) \
- ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
+ (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
/* These numbers seem to be hard coded in the NIC firmware somehow.
* You can't change the ring sizes, but you can change where you place
* them in the NIC onboard memory.
*/
#define TG3_RX_STD_RING_SIZE(tp) \
- ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
- RX_STD_MAX_SIZE_5717 : 512)
+ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
+ TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
#define TG3_DEF_RX_RING_PENDING 200
#define TG3_RX_JMB_RING_SIZE(tp) \
- ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
- 1024 : 256)
+ (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
+ TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
#define TG3_RSS_INDIR_TBL_SIZE 128
@@ -167,11 +190,6 @@
#define TG3_RAW_IP_ALIGN 2
-/* number of ETHTOOL_GSTATS u64's */
-#define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
-
-#define TG3_NUM_TEST 6
-
#define TG3_FW_UPDATE_TIMEOUT_SEC 5
#define FIRMWARE_TG3 "tigon/tg3.bin"
@@ -266,6 +284,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -280,7 +299,7 @@ MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
static const struct {
const char string[ETH_GSTRING_LEN];
-} ethtool_stats_keys[TG3_NUM_STATS] = {
+} ethtool_stats_keys[] = {
{ "rx_octets" },
{ "rx_fragments" },
{ "rx_ucast_packets" },
@@ -345,6 +364,7 @@ static const struct {
{ "dma_write_prioq_full" },
{ "rxbds_empty" },
{ "rx_discards" },
+ { "mbuf_lwm_thresh_hit" },
{ "rx_errors" },
{ "rx_threshold_hit" },
@@ -359,9 +379,12 @@ static const struct {
{ "nic_tx_threshold_hit" }
};
+#define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
+
+
static const struct {
const char string[ETH_GSTRING_LEN];
-} ethtool_test_keys[TG3_NUM_TEST] = {
+} ethtool_test_keys[] = {
{ "nvram test (online) " },
{ "link test (online) " },
{ "register test (offline)" },
@@ -370,6 +393,9 @@ static const struct {
{ "interrupt test (offline)" },
};
+#define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
+
+
static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
{
writel(val, tp->regs + off);
@@ -467,8 +493,7 @@ static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
*/
static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
{
- if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
- (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
+ if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
/* Non-posted methods */
tp->write32(tp, off, val);
else {
@@ -488,8 +513,7 @@ static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
{
tp->write32_mbox(tp, off, val);
- if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
- !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
+ if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
tp->read32_mbox(tp, off);
}
@@ -497,9 +521,9 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
{
void __iomem *mbox = tp->regs + off;
writel(val, mbox);
- if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
+ if (tg3_flag(tp, TXD_MBOX_HWBUG))
writel(val, mbox);
- if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
+ if (tg3_flag(tp, MBOX_WRITE_REORDER))
readl(mbox);
}
@@ -533,7 +557,7 @@ static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
return;
spin_lock_irqsave(&tp->indirect_lock, flags);
- if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
+ if (tg3_flag(tp, SRAM_USE_CONFIG)) {
pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
@@ -560,7 +584,7 @@ static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
}
spin_lock_irqsave(&tp->indirect_lock, flags);
- if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
+ if (tg3_flag(tp, SRAM_USE_CONFIG)) {
pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
@@ -597,7 +621,7 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
int ret = 0;
u32 status, req, gnt;
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
+ if (!tg3_flag(tp, ENABLE_APE))
return 0;
switch (locknum) {
@@ -643,7 +667,7 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
{
u32 gnt;
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
+ if (!tg3_flag(tp, ENABLE_APE))
return;
switch (locknum) {
@@ -687,14 +711,14 @@ static void tg3_enable_ints(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i];
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
- if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
+ if (tg3_flag(tp, 1SHOT_MSI))
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
tp->coal_now |= tnapi->coal_now;
}
/* Force an initial interrupt */
- if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
+ if (!tg3_flag(tp, TAGGED_STATUS) &&
(tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
else
@@ -710,9 +734,7 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
unsigned int work_exists = 0;
/* check for phy events */
- if (!(tp->tg3_flags &
- (TG3_FLAG_USE_LINKCHG_REG |
- TG3_FLAG_POLL_SERDES))) {
+ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
if (sblk->status & SD_STATUS_LINK_CHG)
work_exists = 1;
}
@@ -740,8 +762,7 @@ static void tg3_int_reenable(struct tg3_napi *tnapi)
* The last_tag we write above tells the chip which piece of
* work we've completed.
*/
- if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
- tg3_has_work(tnapi))
+ if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
tw32(HOSTCC_MODE, tp->coalesce_mode |
HOSTCC_MODE_ENABLE | tnapi->coal_now);
}
@@ -751,8 +772,7 @@ static void tg3_switch_clocks(struct tg3 *tp)
u32 clock_ctrl;
u32 orig_clock_ctrl;
- if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
+ if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
return;
clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
@@ -763,7 +783,7 @@ static void tg3_switch_clocks(struct tg3 *tp)
0x1f);
tp->pci_clock_ctrl = clock_ctrl;
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ if (tg3_flag(tp, 5705_PLUS)) {
if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
tw32_wait_f(TG3PCI_CLOCK_CTRL,
clock_ctrl | CLOCK_CTRL_625_CORE, 40);
@@ -880,6 +900,104 @@ static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
return ret;
}
+static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+ MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+ return err;
+}
+
+static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+ if (err)
+ goto done;
+
+ err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+ MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+ if (err)
+ goto done;
+
+ err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+ return err;
+}
+
+static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+ if (!err)
+ err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
+
+ return err;
+}
+
+static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+ if (!err)
+ err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+
+ return err;
+}
+
+static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
+{
+ int err;
+
+ err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
+ (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
+ MII_TG3_AUXCTL_SHDWSEL_MISC);
+ if (!err)
+ err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
+
+ return err;
+}
+
+static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
+{
+ if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
+ set |= MII_TG3_AUXCTL_MISC_WREN;
+
+ return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
+}
+
+#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
+ tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+ MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
+ MII_TG3_AUXCTL_ACTL_TX_6DB)
+
+#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
+ tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+ MII_TG3_AUXCTL_ACTL_TX_6DB);
+
static int tg3_bmcr_reset(struct tg3 *tp)
{
u32 phy_control;
@@ -982,7 +1100,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
return;
}
- if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
+ if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
val |= MAC_PHYCFG2_EMODE_MASK_MASK |
MAC_PHYCFG2_FMODE_MASK_MASK |
MAC_PHYCFG2_GMODE_MASK_MASK |
@@ -995,10 +1113,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
val = tr32(MAC_PHYCFG1);
val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
- if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
+ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
+ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
+ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
}
val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
@@ -1013,13 +1131,13 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
MAC_RGMII_MODE_TX_ENABLE |
MAC_RGMII_MODE_TX_LOWPWR |
MAC_RGMII_MODE_TX_RESET);
- if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
+ if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
+ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
val |= MAC_RGMII_MODE_RX_INT_B |
MAC_RGMII_MODE_RX_QUALITY |
MAC_RGMII_MODE_RX_ACTIVITY |
MAC_RGMII_MODE_RX_ENG_DET;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
+ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
val |= MAC_RGMII_MODE_TX_ENABLE |
MAC_RGMII_MODE_TX_LOWPWR |
MAC_RGMII_MODE_TX_RESET;
@@ -1033,7 +1151,7 @@ static void tg3_mdio_start(struct tg3 *tp)
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
- if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
+ if (tg3_flag(tp, MDIOBUS_INITED) &&
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
}
@@ -1044,8 +1162,7 @@ static int tg3_mdio_init(struct tg3 *tp)
u32 reg;
struct phy_device *phydev;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ if (tg3_flag(tp, 5717_PLUS)) {
u32 is_serdes;
tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
@@ -1062,8 +1179,7 @@ static int tg3_mdio_init(struct tg3 *tp)
tg3_mdio_start(tp);
- if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
- (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
+ if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
return 0;
tp->mdio_bus = mdiobus_alloc();
@@ -1119,11 +1235,11 @@ static int tg3_mdio_init(struct tg3 *tp)
PHY_BRCM_RX_REFCLK_UNUSED |
PHY_BRCM_DIS_TXCRXC_NOENRGY |
PHY_BRCM_AUTO_PWRDWN_ENABLE;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
+ if (tg3_flag(tp, RGMII_INBAND_DISABLE))
phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
+ if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
+ if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
/* fallthru */
case PHY_ID_RTL8211C:
@@ -1137,7 +1253,7 @@ static int tg3_mdio_init(struct tg3 *tp)
break;
}
- tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
+ tg3_flag_set(tp, MDIOBUS_INITED);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
tg3_mdio_config_5785(tp);
@@ -1147,59 +1263,13 @@ static int tg3_mdio_init(struct tg3 *tp)
static void tg3_mdio_fini(struct tg3 *tp)
{
- if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
- tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
+ if (tg3_flag(tp, MDIOBUS_INITED)) {
+ tg3_flag_clear(tp, MDIOBUS_INITED);
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
}
}
-static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
-{
- int err;
-
- err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
- if (err)
- goto done;
-
- err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
- if (err)
- goto done;
-
- err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
- MII_TG3_MMD_CTRL_DATA_NOINC | devad);
- if (err)
- goto done;
-
- err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
-
-done:
- return err;
-}
-
-static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
-{
- int err;
-
- err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
- if (err)
- goto done;
-
- err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
- if (err)
- goto done;
-
- err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
- MII_TG3_MMD_CTRL_DATA_NOINC | devad);
- if (err)
- goto done;
-
- err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
-
-done:
- return err;
-}
-
/* tp->lock is held. */
static inline void tg3_generate_fw_event(struct tg3 *tp)
{
@@ -1247,8 +1317,7 @@ static void tg3_ump_link_report(struct tg3 *tp)
u32 reg;
u32 val;
- if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
- !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
return;
tg3_wait_for_event_ack(tp);
@@ -1308,6 +1377,11 @@ static void tg3_link_report(struct tg3 *tp)
"on" : "off",
(tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
"on" : "off");
+
+ if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
+ netdev_info(tp->dev, "EEE is %s\n",
+ tp->setlpicnt ? "enabled" : "disabled");
+
tg3_ump_link_report(tp);
}
}
@@ -1373,13 +1447,12 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
u32 old_rx_mode = tp->rx_mode;
u32 old_tx_mode = tp->tx_mode;
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
+ if (tg3_flag(tp, USE_PHYLIB))
autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
else
autoneg = tp->link_config.autoneg;
- if (autoneg == AUTONEG_ENABLE &&
- (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
+ if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
else
@@ -1576,28 +1649,6 @@ static void tg3_phy_fini(struct tg3 *tp)
}
}
-static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
-{
- int err;
-
- err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
- if (!err)
- err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
-
- return err;
-}
-
-static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
-{
- int err;
-
- err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
- if (!err)
- err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
-
- return err;
-}
-
static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
{
u32 phytest;
@@ -1622,9 +1673,8 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
{
u32 reg;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
+ if (!tg3_flag(tp, 5705_PLUS) ||
+ (tg3_flag(tp, 5717_PLUS) &&
(tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
return;
@@ -1658,7 +1708,7 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
{
u32 phy;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ if (!tg3_flag(tp, 5705_PLUS) ||
(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
return;
@@ -1680,31 +1730,33 @@ static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
}
} else {
- phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
- MII_TG3_AUXCTL_SHDWSEL_MISC;
- if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
- !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
+ int ret;
+
+ ret = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
+ if (!ret) {
if (enable)
phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
else
phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
- phy |= MII_TG3_AUXCTL_MISC_WREN;
- tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
+ tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
}
}
}
static void tg3_phy_set_wirespeed(struct tg3 *tp)
{
+ int ret;
u32 val;
if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
return;
- if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
- !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
- tg3_writephy(tp, MII_TG3_AUX_CTRL,
- (val | (1 << 15) | (1 << 4)));
+ ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
+ if (!ret)
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
+ val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
}
static void tg3_phy_apply_otp(struct tg3 *tp)
@@ -1716,11 +1768,8 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
otp = tp->phy_otp;
- /* Enable SM_DSP clock and tx 6dB coding. */
- phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
- MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
- MII_TG3_AUXCTL_ACTL_TX_6DB;
- tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
+ if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
+ return;
phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
@@ -1744,10 +1793,7 @@ static void tg3_phy_apply_otp(struct tg3 *tp)
((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
- /* Turn off SM_DSP clock. */
- phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
- MII_TG3_AUXCTL_ACTL_TX_6DB;
- tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
}
static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
@@ -1782,18 +1828,11 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
case ASIC_REV_5717:
case ASIC_REV_5719:
case ASIC_REV_57765:
- /* Enable SM_DSP clock and tx 6dB coding. */
- val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
- MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
- MII_TG3_AUXCTL_ACTL_TX_6DB;
- tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
-
- tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
-
- /* Turn off SM_DSP clock. */
- val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
- MII_TG3_AUXCTL_ACTL_TX_6DB;
- tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+ if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_phydsp_write(tp, MII_TG3_DSP_TAP26,
+ 0x0000);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
}
/* Fallthrough */
case TG3_CL45_D7_EEERES_STAT_LP_100TX:
@@ -1945,8 +1984,9 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
(MII_TG3_CTRL_AS_MASTER |
MII_TG3_CTRL_ENABLE_AS_MASTER));
- /* Enable SM_DSP_CLOCK and 6dB. */
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
+ err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+ if (err)
+ return err;
/* Block the PHY control access. */
tg3_phydsp_write(tp, 0x8005, 0x0800);
@@ -1965,13 +2005,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
- /* Set Extended packet length bit for jumbo frames */
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
- } else {
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
- }
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
@@ -2047,8 +2081,7 @@ static int tg3_phy_reset(struct tg3 *tp)
}
}
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
+ if (tg3_flag(tp, 5717_PLUS) &&
(tp->phy_flags & TG3_PHYFLG_MII_SERDES))
return 0;
@@ -2060,49 +2093,57 @@ static int tg3_phy_reset(struct tg3 *tp)
tg3_phy_toggle_apd(tp, false);
out:
- if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
+ if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
+ !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
tg3_phydsp_write(tp, 0x201f, 0x2aaa);
tg3_phydsp_write(tp, 0x000a, 0x0323);
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
}
+
if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
}
+
if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
- tg3_phydsp_write(tp, 0x000a, 0x310b);
- tg3_phydsp_write(tp, 0x201f, 0x9506);
- tg3_phydsp_write(tp, 0x401f, 0x14e2);
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
+ if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_phydsp_write(tp, 0x000a, 0x310b);
+ tg3_phydsp_write(tp, 0x201f, 0x9506);
+ tg3_phydsp_write(tp, 0x401f, 0x14e2);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
- tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
- if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
- tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
- tg3_writephy(tp, MII_TG3_TEST1,
- MII_TG3_TEST1_TRIM_EN | 0x4);
- } else
- tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
+ if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+ tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+ if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+ tg3_writephy(tp, MII_TG3_TEST1,
+ MII_TG3_TEST1_TRIM_EN | 0x4);
+ } else
+ tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+ }
}
+
/* Set Extended packet length bit (bit 14) on all chips that */
/* support jumbo frames */
if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
/* Cannot do read-modify-write on 5401 */
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
- } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
+ } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
/* Set bit 14 with read-modify-write to preserve other bits */
- if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
- !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
- tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
+ err = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+ if (!err)
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+ val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
}
/* Set phy register 0x10 bit 0 to high fifo elasticity to support
* jumbo frames transmission.
*/
- if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+ if (tg3_flag(tp, JUMBO_CAPABLE)) {
if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
tg3_writephy(tp, MII_TG3_EXT_CTRL,
val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
@@ -2123,14 +2164,15 @@ static void tg3_frob_aux_power(struct tg3 *tp)
bool need_vaux = false;
/* The GPIOs do something completely different on 57765. */
- if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
+ if (!tg3_flag(tp, IS_NIC) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
return;
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
tp->pdev_peer != tp->pdev) {
struct net_device *dev_peer;
@@ -2140,17 +2182,16 @@ static void tg3_frob_aux_power(struct tg3 *tp)
if (dev_peer) {
struct tg3 *tp_peer = netdev_priv(dev_peer);
- if (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE)
+ if (tg3_flag(tp_peer, INIT_COMPLETE))
return;
- if ((tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
- (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ if (tg3_flag(tp_peer, WOL_ENABLE) ||
+ tg3_flag(tp_peer, ENABLE_ASF))
need_vaux = true;
}
}
- if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) ||
- (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
need_vaux = true;
if (need_vaux) {
@@ -2304,11 +2345,10 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_FORCE_LED_OFF);
- tg3_writephy(tp, MII_TG3_AUX_CTRL,
- MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
- MII_TG3_AUXCTL_PCTL_100TX_LPWR |
- MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
- MII_TG3_AUXCTL_PCTL_VREG_11V);
+ val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
+ MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
+ MII_TG3_AUXCTL_PCTL_VREG_11V;
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
}
/* The PHY should not be powered down on some chips because
@@ -2334,7 +2374,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
/* tp->lock is held. */
static int tg3_nvram_lock(struct tg3 *tp)
{
- if (tp->tg3_flags & TG3_FLAG_NVRAM) {
+ if (tg3_flag(tp, NVRAM)) {
int i;
if (tp->nvram_lock_cnt == 0) {
@@ -2357,7 +2397,7 @@ static int tg3_nvram_lock(struct tg3 *tp)
/* tp->lock is held. */
static void tg3_nvram_unlock(struct tg3 *tp)
{
- if (tp->tg3_flags & TG3_FLAG_NVRAM) {
+ if (tg3_flag(tp, NVRAM)) {
if (tp->nvram_lock_cnt > 0)
tp->nvram_lock_cnt--;
if (tp->nvram_lock_cnt == 0)
@@ -2368,8 +2408,7 @@ static void tg3_nvram_unlock(struct tg3 *tp)
/* tp->lock is held. */
static void tg3_enable_nvram_access(struct tg3 *tp)
{
- if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
- !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
+ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
u32 nvaccess = tr32(NVRAM_ACCESS);
tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
@@ -2379,8 +2418,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp)
/* tp->lock is held. */
static void tg3_disable_nvram_access(struct tg3 *tp)
{
- if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
- !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
+ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
u32 nvaccess = tr32(NVRAM_ACCESS);
tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
@@ -2450,10 +2488,10 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
{
- if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
- (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
- (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
- !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
+ if (tg3_flag(tp, NVRAM) &&
+ tg3_flag(tp, NVRAM_BUFFERED) &&
+ tg3_flag(tp, FLASH) &&
+ !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
(tp->nvram_jedecnum == JEDEC_ATMEL))
addr = ((addr / tp->nvram_pagesize) <<
@@ -2465,10 +2503,10 @@ static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
{
- if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
- (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
- (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
- !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
+ if (tg3_flag(tp, NVRAM) &&
+ tg3_flag(tp, NVRAM_BUFFERED) &&
+ tg3_flag(tp, FLASH) &&
+ !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
(tp->nvram_jedecnum == JEDEC_ATMEL))
addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
@@ -2488,7 +2526,7 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
{
int ret;
- if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
+ if (!tg3_flag(tp, NVRAM))
return tg3_nvram_read_using_eeprom(tp, offset, val);
offset = tg3_nvram_phys_addr(tp, offset);
@@ -2580,7 +2618,7 @@ static int tg3_power_up(struct tg3 *tp)
pci_set_power_state(tp->pdev, PCI_D0);
/* Switch out of Vaux if it is a NIC */
- if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
+ if (tg3_flag(tp, IS_NIC))
tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
return 0;
@@ -2594,7 +2632,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_enable_register_access(tp);
/* Restore the CLKREQ setting. */
- if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
+ if (tg3_flag(tp, CLKREQ_BUG)) {
u16 lnkctl;
pci_read_config_word(tp->pdev,
@@ -2611,9 +2649,9 @@ static int tg3_power_down_prepare(struct tg3 *tp)
misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
- (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
+ tg3_flag(tp, WOL_ENABLE);
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+ if (tg3_flag(tp, USE_PHYLIB)) {
do_low_power = false;
if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
@@ -2634,9 +2672,8 @@ static int tg3_power_down_prepare(struct tg3 *tp)
ADVERTISED_Autoneg |
ADVERTISED_10baseT_Half;
- if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
- device_should_wake) {
- if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
+ if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
+ if (tg3_flag(tp, WOL_SPEED_100MB))
advertising |=
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
@@ -2681,7 +2718,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
val = tr32(GRC_VCPU_EXT_CTRL);
tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
- } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
+ } else if (!tg3_flag(tp, ENABLE_ASF)) {
int i;
u32 val;
@@ -2692,7 +2729,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
msleep(1);
}
}
- if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+ if (tg3_flag(tp, WOL_CAP))
tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
WOL_DRV_STATE_SHUTDOWN |
WOL_DRV_WOL |
@@ -2702,8 +2739,13 @@ static int tg3_power_down_prepare(struct tg3 *tp)
u32 mac_mode;
if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
- if (do_low_power) {
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
+ if (do_low_power &&
+ !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+ tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
+ MII_TG3_AUXCTL_PCTL_WOL_EN |
+ MII_TG3_AUXCTL_PCTL_100TX_LPWR |
+ MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
udelay(40);
}
@@ -2715,8 +2757,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
ASIC_REV_5700) {
- u32 speed = (tp->tg3_flags &
- TG3_FLAG_WOL_SPEED_100MB) ?
+ u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
SPEED_100 : SPEED_10;
if (tg3_5700_link_polarity(tp, speed))
mac_mode |= MAC_MODE_LINK_POLARITY;
@@ -2727,17 +2768,15 @@ static int tg3_power_down_prepare(struct tg3 *tp)
mac_mode = MAC_MODE_PORT_MODE_TBI;
}
- if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
+ if (!tg3_flag(tp, 5750_PLUS))
tw32(MAC_LED_CTRL, tp->led_ctrl);
mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
- if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
- ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
- (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
+ if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
+ (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ if (tg3_flag(tp, ENABLE_APE))
mac_mode |= MAC_MODE_APE_TX_EN |
MAC_MODE_APE_RX_EN |
MAC_MODE_TDE_ENABLE;
@@ -2749,7 +2788,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
udelay(10);
}
- if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
+ if (!tg3_flag(tp, WOL_SPEED_100MB) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
u32 base_val;
@@ -2760,12 +2799,11 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
CLOCK_CTRL_PWRDOWN_PLL133, 40);
- } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
- (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
+ } else if (tg3_flag(tp, 5780_CLASS) ||
+ tg3_flag(tp, CPMU_PRESENT) ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
/* do nothing */
- } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
- (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
+ } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
u32 newbits1, newbits2;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -2774,7 +2812,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
CLOCK_CTRL_TXCLK_DISABLE |
CLOCK_CTRL_ALTCLK);
newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
- } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ } else if (tg3_flag(tp, 5705_PLUS)) {
newbits1 = CLOCK_CTRL_625_CORE;
newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
} else {
@@ -2788,7 +2826,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
40);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ if (!tg3_flag(tp, 5705_PLUS)) {
u32 newbits3;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
@@ -2805,8 +2843,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
}
}
- if (!(device_should_wake) &&
- !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
tg3_power_down_phy(tp, do_low_power);
tg3_frob_aux_power(tp);
@@ -2818,7 +2855,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
tw32(0x7d00, val);
- if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
+ if (!tg3_flag(tp, ENABLE_ASF)) {
int err;
err = tg3_nvram_lock(tp);
@@ -2837,7 +2874,7 @@ static void tg3_power_down(struct tg3 *tp)
{
tg3_power_down_prepare(tp);
- pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
+ pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
pci_set_power_state(tp->pdev, PCI_D3hot);
}
@@ -2901,7 +2938,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
- if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
+ if (tg3_flag(tp, WOL_SPEED_100MB))
new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
tg3_writephy(tp, MII_ADVERTISE, new_adv);
@@ -2983,11 +3020,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
tw32(TG3_CPMU_EEE_MODE,
tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
- /* Enable SM_DSP clock and tx 6dB coding. */
- val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
- MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
- MII_TG3_AUXCTL_ACTL_TX_6DB;
- tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+ TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
case ASIC_REV_5717:
@@ -3016,10 +3049,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
}
tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
- /* Turn off SM_DSP clock. */
- val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
- MII_TG3_AUXCTL_ACTL_TX_6DB;
- tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+ TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
}
if (tp->link_config.autoneg == AUTONEG_DISABLE &&
@@ -3077,7 +3107,7 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
/* Turn off tap power management. */
/* Set Extended packet length bit */
- err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
+ err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
@@ -3140,7 +3170,7 @@ static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
if (curadv != reqadv)
return 0;
- if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
+ if (tg3_flag(tp, PAUSE_AUTONEG))
tg3_readphy(tp, MII_LPA, rmtadv);
} else {
/* Reprogram the advertisement register, even if it
@@ -3183,7 +3213,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
udelay(80);
}
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
+ tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
/* Some third-party PHYs need to be reset on link going
* down.
@@ -3203,7 +3233,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
tg3_readphy(tp, MII_BMSR, &bmsr);
if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
- !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
+ !tg3_flag(tp, INIT_COMPLETE))
bmsr = 0;
if (!(bmsr & BMSR_LSTATUS)) {
@@ -3264,11 +3294,13 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_INVALID;
if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
- tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
- tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
- if (!(val & (1 << 10))) {
- val |= (1 << 10);
- tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
+ err = tg3_phy_auxctl_read(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
+ &val);
+ if (!err && !(val & (1 << 10))) {
+ tg3_phy_auxctl_write(tp,
+ MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
+ val | (1 << 10));
goto relink;
}
}
@@ -3341,8 +3373,8 @@ relink:
tg3_phy_copper_begin(tp);
tg3_readphy(tp, MII_BMSR, &bmsr);
- if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
- (bmsr & BMSR_LSTATUS))
+ if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
+ (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
current_link_up = 1;
}
@@ -3385,7 +3417,7 @@ relink:
tg3_phy_eee_adjust(tp, current_link_up);
- if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
+ if (tg3_flag(tp, USE_LINKCHG_REG)) {
/* Polled via timer. */
tw32_f(MAC_EVENT, 0);
} else {
@@ -3396,8 +3428,7 @@ relink:
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
current_link_up == 1 &&
tp->link_config.active_speed == SPEED_1000 &&
- ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
- (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
+ (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
udelay(120);
tw32_f(MAC_STATUS,
(MAC_STATUS_SYNC_CHANGED |
@@ -3409,7 +3440,7 @@ relink:
}
/* Prevent send BD corruption. */
- if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
+ if (tg3_flag(tp, CLKREQ_BUG)) {
u16 oldlnkctl, newlnkctl;
pci_read_config_word(tp->pdev,
@@ -3804,7 +3835,7 @@ static void tg3_init_bcm8002(struct tg3 *tp)
int i;
/* Reset when initting first time or we have a link. */
- if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
+ if (tg3_flag(tp, INIT_COMPLETE) &&
!(mac_status & MAC_STATUS_PCS_SYNCED))
return;
@@ -4065,9 +4096,9 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
orig_active_speed = tp->link_config.active_speed;
orig_active_duplex = tp->link_config.active_duplex;
- if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
+ if (!tg3_flag(tp, HW_AUTONEG) &&
netif_carrier_ok(tp->dev) &&
- (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
+ tg3_flag(tp, INIT_COMPLETE)) {
mac_status = tr32(MAC_STATUS);
mac_status &= (MAC_STATUS_PCS_SYNCED |
MAC_STATUS_SIGNAL_DET |
@@ -4098,7 +4129,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
current_link_up = 0;
mac_status = tr32(MAC_STATUS);
- if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
+ if (tg3_flag(tp, HW_AUTONEG))
current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
else
current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
@@ -4297,7 +4328,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
- } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ } else if (!tg3_flag(tp, 5780_CLASS)) {
/* Link is up via parallel detect */
} else {
current_link_up = 0;
@@ -4394,6 +4425,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
static int tg3_setup_phy(struct tg3 *tp, int force_reset)
{
+ u32 val;
int err;
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
@@ -4404,7 +4436,7 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
err = tg3_setup_copper_phy(tp, force_reset);
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
- u32 val, scale;
+ u32 scale;
val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
@@ -4419,19 +4451,22 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
tw32(GRC_MISC_CFG, val);
}
+ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ val |= tr32(MAC_TX_LENGTHS) &
+ (TX_LENGTHS_JMB_FRM_LEN_MSK |
+ TX_LENGTHS_CNT_DWN_VAL_MSK);
+
if (tp->link_config.active_speed == SPEED_1000 &&
tp->link_config.active_duplex == DUPLEX_HALF)
- tw32(MAC_TX_LENGTHS,
- ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
- (6 << TX_LENGTHS_IPG_SHIFT) |
- (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
+ tw32(MAC_TX_LENGTHS, val |
+ (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
else
- tw32(MAC_TX_LENGTHS,
- ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
- (6 << TX_LENGTHS_IPG_SHIFT) |
- (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
+ tw32(MAC_TX_LENGTHS, val |
+ (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ if (!tg3_flag(tp, 5705_PLUS)) {
if (netif_carrier_ok(tp->dev)) {
tw32(HOSTCC_STAT_COAL_TICKS,
tp->coal.stats_block_coalesce_usecs);
@@ -4440,8 +4475,8 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
}
}
- if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
- u32 val = tr32(PCIE_PWR_MGMT_THRESH);
+ if (tg3_flag(tp, ASPM_WORKAROUND)) {
+ val = tr32(PCIE_PWR_MGMT_THRESH);
if (!netif_carrier_ok(tp->dev))
val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
tp->pwrmgmt_thresh;
@@ -4458,6 +4493,123 @@ static inline int tg3_irq_sync(struct tg3 *tp)
return tp->irq_sync;
}
+static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
+{
+ int i;
+
+ dst = (u32 *)((u8 *)dst + off);
+ for (i = 0; i < len; i += sizeof(u32))
+ *dst++ = tr32(off + i);
+}
+
+static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
+{
+ tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
+ tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
+ tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
+ tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
+ tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
+ tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
+ tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
+ tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
+ tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
+ tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
+ tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
+ tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
+ tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
+ tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
+
+ if (tg3_flag(tp, SUPPORT_MSIX))
+ tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
+
+ tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
+ tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
+ tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
+ tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
+ tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
+ tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
+
+ if (!tg3_flag(tp, 5705_PLUS)) {
+ tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
+ tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
+ }
+
+ tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
+ tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
+ tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
+ tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
+ tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
+
+ if (tg3_flag(tp, NVRAM))
+ tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
+}
+
+static void tg3_dump_state(struct tg3 *tp)
+{
+ int i;
+ u32 *regs;
+
+ regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
+ if (!regs) {
+ netdev_err(tp->dev, "Failed allocating register dump buffer\n");
+ return;
+ }
+
+ if (tg3_flag(tp, PCI_EXPRESS)) {
+ /* Read up to but not including private PCI registers */
+ for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
+ regs[i / sizeof(u32)] = tr32(i);
+ } else
+ tg3_dump_legacy_regs(tp, regs);
+
+ for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
+ if (!regs[i + 0] && !regs[i + 1] &&
+ !regs[i + 2] && !regs[i + 3])
+ continue;
+
+ netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+ i * 4,
+ regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
+ }
+
+ kfree(regs);
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ /* SW status block */
+ netdev_err(tp->dev,
+ "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
+ i,
+ tnapi->hw_status->status,
+ tnapi->hw_status->status_tag,
+ tnapi->hw_status->rx_jumbo_consumer,
+ tnapi->hw_status->rx_consumer,
+ tnapi->hw_status->rx_mini_consumer,
+ tnapi->hw_status->idx[0].rx_producer,
+ tnapi->hw_status->idx[0].tx_consumer);
+
+ netdev_err(tp->dev,
+ "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
+ i,
+ tnapi->last_tag, tnapi->last_irq_tag,
+ tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
+ tnapi->rx_rcb_ptr,
+ tnapi->prodring.rx_std_prod_idx,
+ tnapi->prodring.rx_std_cons_idx,
+ tnapi->prodring.rx_jmb_prod_idx,
+ tnapi->prodring.rx_jmb_cons_idx);
+ }
+}
+
/* This is called whenever we suspect that the system chipset is re-
* ordering the sequence of MMIO to the tx send mailbox. The symptom
* is bogus tx completions. We try to recover by setting the
@@ -4466,7 +4618,7 @@ static inline int tg3_irq_sync(struct tg3 *tp)
*/
static void tg3_tx_recover(struct tg3 *tp)
{
- BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
+ BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
tp->write32_tx_mbox == tg3_write_indirect_mbox);
netdev_warn(tp->dev,
@@ -4476,7 +4628,7 @@ static void tg3_tx_recover(struct tg3 *tp)
"and include system chipset information.\n");
spin_lock(&tp->lock);
- tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
+ tg3_flag_set(tp, TX_RECOVERY_PENDING);
spin_unlock(&tp->lock);
}
@@ -4500,7 +4652,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
struct netdev_queue *txq;
int index = tnapi - tp->napi;
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ if (tg3_flag(tp, ENABLE_TSS))
index--;
txq = netdev_get_tx_queue(tp->dev, index);
@@ -4815,7 +4967,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
skb = copy_skb;
}
- if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
+ if ((tp->dev->features & NETIF_F_RXCSUM) &&
(desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
(((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
>> RXD_TCPCSUM_SHIFT) == 0xffff))
@@ -4868,7 +5020,7 @@ next_pkt_nopost:
tw32_rx_mbox(tnapi->consmbox, sw_idx);
/* Refill RX ring(s). */
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
+ if (!tg3_flag(tp, ENABLE_RSS)) {
if (work_mask & RXD_OPAQUE_RING_STD) {
tpr->rx_std_prod_idx = std_prod_idx &
tp->rx_std_ring_mask;
@@ -4901,16 +5053,14 @@ next_pkt_nopost:
static void tg3_poll_link(struct tg3 *tp)
{
/* handle link change and other phy events */
- if (!(tp->tg3_flags &
- (TG3_FLAG_USE_LINKCHG_REG |
- TG3_FLAG_POLL_SERDES))) {
+ if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
struct tg3_hw_status *sblk = tp->napi[0].hw_status;
if (sblk->status & SD_STATUS_LINK_CHG) {
sblk->status = SD_STATUS_UPDATED |
(sblk->status & ~SD_STATUS_LINK_CHG);
spin_lock(&tp->lock);
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+ if (tg3_flag(tp, USE_PHYLIB)) {
tw32_f(MAC_STATUS,
(MAC_STATUS_SYNC_CHANGED |
MAC_STATUS_CFG_CHANGED |
@@ -5057,7 +5207,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
/* run TX completion thread */
if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
tg3_tx(tnapi);
- if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
+ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
return work_done;
}
@@ -5068,7 +5218,7 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
work_done += tg3_rx(tnapi, budget - work_done);
- if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
+ if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
int i, err = 0;
u32 std_prod_idx = dpr->rx_std_prod_idx;
@@ -5107,7 +5257,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
while (1) {
work_done = tg3_poll_work(tnapi, work_done, budget);
- if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
+ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
goto tx_recovery;
if (unlikely(work_done >= budget))
@@ -5141,6 +5291,40 @@ tx_recovery:
return work_done;
}
+static void tg3_process_error(struct tg3 *tp)
+{
+ u32 val;
+ bool real_error = false;
+
+ if (tg3_flag(tp, ERROR_PROCESSED))
+ return;
+
+ /* Check Flow Attention register */
+ val = tr32(HOSTCC_FLOW_ATTN);
+ if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
+ netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
+ real_error = true;
+ }
+
+ if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
+ netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
+ real_error = true;
+ }
+
+ if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
+ netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
+ real_error = true;
+ }
+
+ if (!real_error)
+ return;
+
+ tg3_dump_state(tp);
+
+ tg3_flag_set(tp, ERROR_PROCESSED);
+ schedule_work(&tp->reset_task);
+}
+
static int tg3_poll(struct napi_struct *napi, int budget)
{
struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
@@ -5149,17 +5333,20 @@ static int tg3_poll(struct napi_struct *napi, int budget)
struct tg3_hw_status *sblk = tnapi->hw_status;
while (1) {
+ if (sblk->status & SD_STATUS_ERROR)
+ tg3_process_error(tp);
+
tg3_poll_link(tp);
work_done = tg3_poll_work(tnapi, work_done, budget);
- if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
+ if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
goto tx_recovery;
if (unlikely(work_done >= budget))
break;
- if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
+ if (tg3_flag(tp, TAGGED_STATUS)) {
/* tp->last_tag is used in tg3_int_reenable() below
* to tell the hw how much work has been processed,
* so we must read it before checking for more work.
@@ -5326,7 +5513,7 @@ static irqreturn_t tg3_interrupt(int irq, void *dev_id)
* interrupt is ours and will flush the status block.
*/
if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
- if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
+ if (tg3_flag(tp, CHIP_RESETTING) ||
(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
handled = 0;
goto out;
@@ -5375,7 +5562,7 @@ static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
* interrupt is ours and will flush the status block.
*/
if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
- if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
+ if (tg3_flag(tp, CHIP_RESETTING) ||
(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
handled = 0;
goto out;
@@ -5488,14 +5675,14 @@ static void tg3_reset_task(struct work_struct *work)
tg3_full_lock(tp, 1);
- restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
- tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
+ restart_timer = tg3_flag(tp, RESTART_TIMER);
+ tg3_flag_clear(tp, RESTART_TIMER);
- if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
+ if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
tp->write32_tx_mbox = tg3_write32_tx_mbox;
tp->write32_rx_mbox = tg3_write_flush_reg32;
- tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
- tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
+ tg3_flag_set(tp, MBOX_WRITE_REORDER);
+ tg3_flag_clear(tp, TX_RECOVERY_PENDING);
}
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
@@ -5515,21 +5702,13 @@ out:
tg3_phy_start(tp);
}
-static void tg3_dump_short_state(struct tg3 *tp)
-{
- netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
- tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
- netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
- tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
-}
-
static void tg3_tx_timeout(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
if (netif_msg_tx_err(tp)) {
netdev_err(dev, "transmit timed out, resetting\n");
- tg3_dump_short_state(tp);
+ tg3_dump_state(tp);
}
schedule_work(&tp->reset_task);
@@ -5548,7 +5727,7 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
int len)
{
#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
- if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
+ if (tg3_flag(tp, 40BIT_DMA_BUG))
return ((u64) mapping + len) > DMA_BIT_MASK(40);
return 0;
#else
@@ -5595,8 +5774,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
/* Make sure new skb does not cross any 4G boundaries.
* Drop the packet if it does.
*/
- } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
- tg3_4g_overflow_test(new_addr, new_skb->len)) {
+ } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
+ tg3_4g_overflow_test(new_addr, new_skb->len)) {
pci_unmap_single(tp->pdev, new_addr, new_skb->len,
PCI_DMA_TODEVICE);
ret = -1;
@@ -5663,7 +5842,7 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
}
/* hard_start_xmit for devices that don't have any bugs and
- * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
+ * support TG3_FLAG_HW_TSO_2 and TG3_FLAG_HW_TSO_3 only.
*/
static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
struct net_device *dev)
@@ -5677,7 +5856,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ if (tg3_flag(tp, ENABLE_TSS))
tnapi++;
/* We are running in BH disabled context with netif_tx_lock
@@ -5722,7 +5901,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
hdrlen = ip_tcp_len + tcp_opt_len;
}
- if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
+ if (tg3_flag(tp, HW_TSO_3)) {
mss |= (hdrlen & 0xc) << 12;
if (hdrlen & 0x10)
base_flags |= 0x00000010;
@@ -5755,7 +5934,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
tnapi->tx_buffers[entry].skb = skb;
dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
- if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
+ if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
!mss && skb->len > VLAN_ETH_FRAME_LEN)
base_flags |= TXD_FLAG_JMB_PKT;
@@ -5878,7 +6057,7 @@ tg3_tso_bug_end:
}
/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
- * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
+ * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
*/
static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
struct net_device *dev)
@@ -5893,7 +6072,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ if (tg3_flag(tp, ENABLE_TSS))
tnapi++;
/* We are running in BH disabled context with netif_tx_lock
@@ -5944,13 +6123,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
}
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
- (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
+ tg3_flag(tp, TSO_BUG))
return tg3_tso_bug(tp, skb);
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
- if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3)) {
tcp_hdr(skb)->check = 0;
base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
} else
@@ -5959,14 +6140,14 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
IPPROTO_TCP,
0);
- if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
+ if (tg3_flag(tp, HW_TSO_3)) {
mss |= (hdr_len & 0xc) << 12;
if (hdr_len & 0x10)
base_flags |= 0x00000010;
base_flags |= (hdr_len & 0x3e0) << 5;
- } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
+ } else if (tg3_flag(tp, HW_TSO_2))
mss |= hdr_len << 9;
- else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
+ else if (tg3_flag(tp, HW_TSO_1) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
if (tcp_opt_len || iph->ihl > 5) {
int tsflags;
@@ -5988,7 +6169,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
base_flags |= (TXD_FLAG_VLAN |
(vlan_tx_tag_get(skb) << 16));
- if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
+ if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
!mss && skb->len > VLAN_ETH_FRAME_LEN)
base_flags |= TXD_FLAG_JMB_PKT;
@@ -6005,18 +6186,18 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
would_hit_hwbug = 0;
- if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
+ if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
would_hit_hwbug = 1;
- if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
+ if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
- if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
+ if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
tg3_40bit_overflow_test(tp, mapping, len))
would_hit_hwbug = 1;
- if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
+ if (tg3_flag(tp, 5701_DMA_BUG))
would_hit_hwbug = 1;
tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -6042,19 +6223,21 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
if (pci_dma_mapping_error(tp->pdev, mapping))
goto dma_error;
- if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
+ if (tg3_flag(tp, SHORT_DMA_BUG) &&
len <= 8)
would_hit_hwbug = 1;
- if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
+ if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
tg3_4g_overflow_test(mapping, len))
would_hit_hwbug = 1;
- if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
+ if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
tg3_40bit_overflow_test(tp, mapping, len))
would_hit_hwbug = 1;
- if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
tg3_set_txd(tnapi, entry, mapping, len,
base_flags, (i == last)|(mss << 1));
else
@@ -6126,22 +6309,80 @@ dma_error:
return NETDEV_TX_OK;
}
+static void tg3_set_loopback(struct net_device *dev, u32 features)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (features & NETIF_F_LOOPBACK) {
+ if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
+ return;
+
+ /*
+ * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
+ * loopback mode if Half-Duplex mode was negotiated earlier.
+ */
+ tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+
+ /* Enable internal MAC loopback mode */
+ tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
+ spin_lock_bh(&tp->lock);
+ tw32(MAC_MODE, tp->mac_mode);
+ netif_carrier_on(tp->dev);
+ spin_unlock_bh(&tp->lock);
+ netdev_info(dev, "Internal MAC loopback mode enabled.\n");
+ } else {
+ if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
+ return;
+
+ /* Disable internal MAC loopback mode */
+ tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
+ spin_lock_bh(&tp->lock);
+ tw32(MAC_MODE, tp->mac_mode);
+ /* Force link status check */
+ tg3_setup_phy(tp, 1);
+ spin_unlock_bh(&tp->lock);
+ netdev_info(dev, "Internal MAC loopback mode disabled.\n");
+ }
+}
+
+static u32 tg3_fix_features(struct net_device *dev, u32 features)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
+ features &= ~NETIF_F_ALL_TSO;
+
+ return features;
+}
+
+static int tg3_set_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
+ tg3_set_loopback(dev, features);
+
+ return 0;
+}
+
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
int new_mtu)
{
dev->mtu = new_mtu;
if (new_mtu > ETH_DATA_LEN) {
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
- tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
- ethtool_op_set_tso(dev, 0);
+ if (tg3_flag(tp, 5780_CLASS)) {
+ netdev_update_features(dev);
+ tg3_flag_clear(tp, TSO_CAPABLE);
} else {
- tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
+ tg3_flag_set(tp, JUMBO_RING_ENABLE);
}
} else {
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
- tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
- tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
+ if (tg3_flag(tp, 5780_CLASS)) {
+ tg3_flag_set(tp, TSO_CAPABLE);
+ netdev_update_features(dev);
+ }
+ tg3_flag_clear(tp, JUMBO_RING_ENABLE);
}
}
@@ -6195,7 +6436,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
- if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+ if (tg3_flag(tp, JUMBO_CAPABLE)) {
for (i = tpr->rx_jmb_cons_idx;
i != tpr->rx_jmb_prod_idx;
i = (i + 1) & tp->rx_jmb_ring_mask) {
@@ -6211,8 +6452,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
- if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
@@ -6249,7 +6489,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
- if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
+ if (tg3_flag(tp, 5780_CLASS) &&
tp->dev->mtu > ETH_DATA_LEN)
rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
@@ -6282,13 +6522,12 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
}
}
- if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
+ if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
goto done;
memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
- if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
+ if (!tg3_flag(tp, JUMBO_RING_ENABLE))
goto done;
for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
@@ -6357,8 +6596,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
if (!tpr->rx_std)
goto err_out;
- if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
GFP_KERNEL);
if (!tpr->rx_jmb_buffers)
@@ -6556,8 +6794,8 @@ static int tg3_alloc_consistent(struct tg3 *tp)
/* If multivector TSS is enabled, vector 0 does not handle
* tx interrupts. Don't allocate any resources for it.
*/
- if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
- (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
+ if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
+ (i && tg3_flag(tp, ENABLE_TSS))) {
tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
TG3_TX_RING_SIZE,
GFP_KERNEL);
@@ -6597,7 +6835,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
* If multivector RSS is enabled, vector 0 does not handle
* rx or tx interrupts. Don't allocate any resources for it.
*/
- if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
+ if (!i && tg3_flag(tp, ENABLE_RSS))
continue;
tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
@@ -6627,7 +6865,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
unsigned int i;
u32 val;
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ if (tg3_flag(tp, 5705_PLUS)) {
switch (ofs) {
case RCVLSC_MODE:
case DMAC_MODE:
@@ -6737,7 +6975,7 @@ static void tg3_ape_send_event(struct tg3 *tp, u32 event)
u32 apedata;
/* NCSI does not support APE events */
- if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
+ if (tg3_flag(tp, APE_HAS_NCSI))
return;
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
@@ -6776,7 +7014,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
u32 event;
u32 apedata;
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
+ if (!tg3_flag(tp, ENABLE_APE))
return;
switch (kind) {
@@ -6805,7 +7043,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
if (device_may_wakeup(&tp->pdev->dev) &&
- (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
+ tg3_flag(tp, WOL_ENABLE)) {
tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
TG3_APE_HOST_WOL_SPEED_AUTO);
apedata = TG3_APE_HOST_DRVR_STATE_WOL;
@@ -6834,7 +7072,7 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
- if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
+ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
switch (kind) {
case RESET_KIND_INIT:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
@@ -6864,7 +7102,7 @@ static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
/* tp->lock is held. */
static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
{
- if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
+ if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
switch (kind) {
case RESET_KIND_INIT:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
@@ -6888,7 +7126,7 @@ static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
/* tp->lock is held. */
static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
{
- if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+ if (tg3_flag(tp, ENABLE_ASF)) {
switch (kind) {
case RESET_KIND_INIT:
tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
@@ -6939,9 +7177,8 @@ static int tg3_poll_fw(struct tg3 *tp)
* of the above loop as an error, but do report the lack of
* running firmware once.
*/
- if (i >= 100000 &&
- !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
- tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
+ if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
+ tg3_flag_set(tp, NO_FWARE_REPORTED);
netdev_info(tp->dev, "No firmware running\n");
}
@@ -6974,10 +7211,10 @@ static void tg3_restore_pci_state(struct tg3 *tp)
/* Set MAX PCI retry to zero. */
val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
+ tg3_flag(tp, PCIX_MODE))
val |= PCISTATE_RETRY_SAME_DMA;
/* Allow reads and writes to the APE register and memory space. */
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ if (tg3_flag(tp, ENABLE_APE))
val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
PCISTATE_ALLOW_APE_SHMEM_WR |
PCISTATE_ALLOW_APE_PSPACE_WR;
@@ -6986,7 +7223,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
- if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
+ if (tg3_flag(tp, PCI_EXPRESS))
pcie_set_readrq(tp->pdev, tp->pcie_readrq);
else {
pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
@@ -6997,7 +7234,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
}
/* Make sure PCI-X relaxed ordering bit is clear. */
- if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
+ if (tg3_flag(tp, PCIX_MODE)) {
u16 pcix_cmd;
pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
@@ -7007,12 +7244,12 @@ static void tg3_restore_pci_state(struct tg3 *tp)
pcix_cmd);
}
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
+ if (tg3_flag(tp, 5780_CLASS)) {
/* Chip reset on 5780 will reset MSI enable bit,
* so need to restore it.
*/
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+ if (tg3_flag(tp, USING_MSI)) {
u16 ctrl;
pci_read_config_word(tp->pdev,
@@ -7052,7 +7289,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_save_pci_state(tp);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
- (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
+ tg3_flag(tp, 5755_PLUS))
tw32(GRC_FASTBOOT_PC, 0);
/*
@@ -7071,7 +7308,7 @@ static int tg3_chip_reset(struct tg3 *tp)
* at this time, but the irq handler may still be called due to irq
* sharing or irqpoll.
*/
- tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
+ tg3_flag_set(tp, CHIP_RESETTING);
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
if (tnapi->hw_status) {
@@ -7094,10 +7331,10 @@ static int tg3_chip_reset(struct tg3 *tp)
/* do the reset */
val = GRC_MISC_CFG_CORECLK_RESET;
- if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+ if (tg3_flag(tp, PCI_EXPRESS)) {
/* Force PCIe 1.0a mode */
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
- !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+ !tg3_flag(tp, 57765_PLUS) &&
tr32(TG3_PCIE_PHY_TSTCTL) ==
(TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
@@ -7115,8 +7352,7 @@ static int tg3_chip_reset(struct tg3 *tp)
}
/* Manage gphy power for all CPMU absent PCIe devices. */
- if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
- !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
+ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
tw32(GRC_MISC_CFG, val);
@@ -7149,7 +7385,7 @@ static int tg3_chip_reset(struct tg3 *tp)
udelay(120);
- if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
+ if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
u16 val16;
if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
@@ -7175,7 +7411,7 @@ static int tg3_chip_reset(struct tg3 *tp)
* Older PCIe devices only support the 128 byte
* MPS setting. Enforce the restriction.
*/
- if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
+ if (!tg3_flag(tp, CPMU_PRESENT))
val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
pci_write_config_word(tp->pdev,
tp->pcie_cap + PCI_EXP_DEVCTL,
@@ -7194,10 +7430,11 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_restore_pci_state(tp);
- tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
+ tg3_flag_clear(tp, CHIP_RESETTING);
+ tg3_flag_clear(tp, ERROR_PROCESSED);
val = 0;
- if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
+ if (tg3_flag(tp, 5780_CLASS))
val = tr32(MEMARB_MODE);
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
@@ -7222,7 +7459,7 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ if (tg3_flag(tp, ENABLE_APE))
tp->mac_mode = MAC_MODE_APE_TX_EN |
MAC_MODE_APE_RX_EN |
MAC_MODE_TDE_ENABLE;
@@ -7247,28 +7484,33 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_mdio_start(tp);
- if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
+ if (tg3_flag(tp, PCI_EXPRESS) &&
tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
- !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
+ !tg3_flag(tp, 57765_PLUS)) {
val = tr32(0x7c00);
tw32(0x7c00, val | (1 << 25));
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ val = tr32(TG3_CPMU_CLCK_ORIDE);
+ tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+ }
+
/* Reprobe ASF enable state. */
- tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
- tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
+ tg3_flag_clear(tp, ENABLE_ASF);
+ tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
if (val == NIC_SRAM_DATA_SIG_MAGIC) {
u32 nic_cfg;
tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
- tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
+ tg3_flag_set(tp, ENABLE_ASF);
tp->last_event_jiffies = jiffies;
- if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
- tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
+ if (tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
}
}
@@ -7278,8 +7520,7 @@ static int tg3_chip_reset(struct tg3 *tp)
/* tp->lock is held. */
static void tg3_stop_fw(struct tg3 *tp)
{
- if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
- !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
+ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
/* Wait for RX cpu to ACK the previous event. */
tg3_wait_for_event_ack(tp);
@@ -7325,8 +7566,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
{
int i;
- BUG_ON(offset == TX_CPU_BASE &&
- (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
+ BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
u32 val = tr32(GRC_VCPU_EXT_CTRL);
@@ -7361,7 +7601,7 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
}
/* Clear firmware's nvram arbitration. */
- if (tp->tg3_flags & TG3_FLAG_NVRAM)
+ if (tg3_flag(tp, NVRAM))
tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
return 0;
}
@@ -7379,15 +7619,14 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
int err, lock_err, i;
void (*write_op)(struct tg3 *, u32, u32);
- if (cpu_base == TX_CPU_BASE &&
- (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
netdev_err(tp->dev,
"%s: Trying to load TX cpu firmware which is 5705\n",
__func__);
return -EINVAL;
}
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ if (tg3_flag(tp, 5705_PLUS))
write_op = tg3_write_mem;
else
write_op = tg3_write_indirect_reg32;
@@ -7473,8 +7712,6 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
return 0;
}
-/* 5705 needs a special version of the TSO firmware. */
-
/* tp->lock is held. */
static int tg3_load_tso_firmware(struct tg3 *tp)
{
@@ -7483,7 +7720,9 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
int err, i;
- if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
return 0;
fw_data = (void *)tp->fw->data;
@@ -7552,7 +7791,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
if (!netif_running(dev))
return 0;
- if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
+ if (tg3_flag(tp, ENABLE_ASF)) {
u32 addr0_high, addr0_low, addr1_high, addr1_low;
addr0_high = tr32(MAC_ADDR_0_HIGH);
@@ -7587,7 +7826,7 @@ static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
(bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
maxlen_flags);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!tg3_flag(tp, 5705_PLUS))
tg3_write_mem(tp,
(bdinfo_addr + TG3_BDINFO_NIC_ADDR),
nic_addr);
@@ -7598,7 +7837,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
{
int i;
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
+ if (!tg3_flag(tp, ENABLE_TSS)) {
tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
@@ -7608,7 +7847,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
}
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
+ if (!tg3_flag(tp, ENABLE_RSS)) {
tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
@@ -7618,7 +7857,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
}
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ if (!tg3_flag(tp, 5705_PLUS)) {
u32 val = ec->stats_block_coalesce_usecs;
tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
@@ -7640,7 +7879,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
tw32(reg, ec->rx_max_coalesced_frames_irq);
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
+ if (tg3_flag(tp, ENABLE_TSS)) {
reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
tw32(reg, ec->tx_coalesce_usecs);
reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
@@ -7655,7 +7894,7 @@ static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
+ if (tg3_flag(tp, ENABLE_TSS)) {
tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
@@ -7671,10 +7910,9 @@ static void tg3_rings_reset(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[0];
/* Disable all transmit rings but the first. */
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!tg3_flag(tp, 5705_PLUS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ else if (tg3_flag(tp, 5717_PLUS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
@@ -7688,10 +7926,9 @@ static void tg3_rings_reset(struct tg3 *tp)
/* Disable all receive return rings but the first. */
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_flag(tp, 5717_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
- else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ else if (!tg3_flag(tp, 5705_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
@@ -7708,16 +7945,16 @@ static void tg3_rings_reset(struct tg3 *tp)
tw32_mailbox_f(tp->napi[0].int_mbox, 1);
/* Zero mailbox registers. */
- if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
+ if (tg3_flag(tp, SUPPORT_MSIX)) {
for (i = 1; i < tp->irq_max; i++) {
tp->napi[i].tx_prod = 0;
tp->napi[i].tx_cons = 0;
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ if (tg3_flag(tp, ENABLE_TSS))
tw32_mailbox(tp->napi[i].prodmbox, 0);
tw32_rx_mbox(tp->napi[i].consmbox, 0);
tw32_mailbox_f(tp->napi[i].int_mbox, 1);
}
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
+ if (!tg3_flag(tp, ENABLE_TSS))
tw32_mailbox(tp->napi[0].prodmbox, 0);
} else {
tp->napi[0].tx_prod = 0;
@@ -7727,7 +7964,7 @@ static void tg3_rings_reset(struct tg3 *tp)
}
/* Make sure the NIC-based send BD rings are disabled. */
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ if (!tg3_flag(tp, 5705_PLUS)) {
u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
for (i = 0; i < 16; i++)
tw32_tx_mbox(mbox + i * 8, 0);
@@ -7787,6 +8024,47 @@ static void tg3_rings_reset(struct tg3 *tp)
}
}
+static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
+{
+ u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
+
+ if (!tg3_flag(tp, 5750_PLUS) ||
+ tg3_flag(tp, 5780_CLASS) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
+ else
+ bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
+
+ nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
+ host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
+
+ val = min(nic_rep_thresh, host_rep_thresh);
+ tw32(RCVBDI_STD_THRESH, val);
+
+ if (tg3_flag(tp, 57765_PLUS))
+ tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
+
+ if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
+ return;
+
+ if (!tg3_flag(tp, 5705_PLUS))
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
+ else
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+
+ host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
+
+ val = min(bdcache_maxcnt / 2, host_rep_thresh);
+ tw32(RCVBDI_JUMBO_THRESH, val);
+
+ if (tg3_flag(tp, 57765_PLUS))
+ tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
+}
+
/* tp->lock is held. */
static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
@@ -7800,7 +8078,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
- if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
+ if (tg3_flag(tp, INIT_COMPLETE))
tg3_abort_hw(tp, 1);
/* Enable MAC control of LPI */
@@ -7820,7 +8098,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ if (tg3_flag(tp, ENABLE_APE))
val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
tw32_f(TG3_CPMU_EEE_MODE, val);
@@ -7879,7 +8157,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
}
- if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
+ if (tg3_flag(tp, L1PLLPD_EN)) {
u32 grc_mode = tr32(GRC_MODE);
/* Access the lower 1K of PL PCIE block registers. */
@@ -7920,20 +8198,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
* other revision. But do not set this on PCI Express
* chips and don't even touch the clocks if the CPMU is present.
*/
- if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
- if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+ if (!tg3_flag(tp, CPMU_PRESENT)) {
+ if (!tg3_flag(tp, PCI_EXPRESS))
tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
}
if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
+ tg3_flag(tp, PCIX_MODE)) {
val = tr32(TG3PCI_PCISTATE);
val |= PCISTATE_RETRY_SAME_DMA;
tw32(TG3PCI_PCISTATE, val);
}
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ if (tg3_flag(tp, ENABLE_APE)) {
/* Allow reads and writes to the
* APE register and memory space.
*/
@@ -7960,11 +8238,14 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (err)
return err;
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
+ if (tg3_flag(tp, 57765_PLUS)) {
val = tr32(TG3PCI_DMA_RW_CTRL) &
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+ val |= DMA_RWCTRL_TAGGED_STAT_WA;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
@@ -7999,7 +8280,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MISC_CFG, val);
/* Initialize MBUF/DESC pool. */
- if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
+ if (tg3_flag(tp, 5750_PLUS)) {
/* Do nothing. */
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
@@ -8009,7 +8290,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
- } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
+ } else if (tg3_flag(tp, TSO_CAPABLE)) {
int fw_len;
fw_len = tp->fw_len;
@@ -8043,6 +8324,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
val |= BUFMGR_MODE_NO_TX_UNDERRUN;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
+ val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
tw32(BUFMGR_MODE, val);
for (i = 0; i < 2000; i++) {
if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
@@ -8054,21 +8339,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return -ENODEV;
}
- /* Setup replenish threshold. */
- val = tp->rx_pending / 8;
- if (val == 0)
- val = 1;
- else if (val > tp->rx_std_max_post)
- val = tp->rx_std_max_post;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
- tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
-
- if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
- val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
- }
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
+ tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
- tw32(RCVBDI_STD_THRESH, val);
+ tg3_setup_rxbd_thresholds(tp);
/* Initialize TG3_BDINFO's at:
* RCVDBDI_STD_BD: standard eth size rx ring
@@ -8091,13 +8365,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
((u64) tpr->rx_std_mapping >> 32));
tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tpr->rx_std_mapping & 0xffffffff));
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
+ if (!tg3_flag(tp, 5717_PLUS))
tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_BUFFER_DESC);
/* Disable the mini ring */
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!tg3_flag(tp, 5705_PLUS))
tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
BDINFO_FLAGS_DISABLED);
@@ -8105,20 +8378,18 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
* blocks on those devices that have them.
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
- /* Setup replenish threshold. */
- tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
+ (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
- if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
+ if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
((u64) tpr->rx_jmb_mapping >> 32));
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tpr->rx_jmb_mapping & 0xffffffff));
+ val = TG3_RX_JMB_RING_SIZE(tp) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT;
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
- (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
- BDINFO_FLAGS_USE_EXT_RECV);
- if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
+ val | BDINFO_FLAGS_USE_EXT_RECV);
+ if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
@@ -8127,32 +8398,27 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
BDINFO_FLAGS_DISABLED);
}
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
+ if (tg3_flag(tp, 57765_PLUS)) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
- val = RX_STD_MAX_SIZE_5705;
+ val = TG3_RX_STD_MAX_SIZE_5700;
else
- val = RX_STD_MAX_SIZE_5717;
+ val = TG3_RX_STD_MAX_SIZE_5717;
val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
val |= (TG3_RX_STD_DMA_SZ << 2);
} else
val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
} else
- val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
+ val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
tpr->rx_std_prod_idx = tp->rx_pending;
tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
- tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
- tp->rx_jumbo_pending : 0;
+ tpr->rx_jmb_prod_idx =
+ tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
- tw32(STD_REPLENISH_LWM, 32);
- tw32(JMB_REPLENISH_LWM, 16);
- }
-
tg3_rings_reset(tp);
/* Initialize MAC address and backoff seed. */
@@ -8165,10 +8431,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* The slot time is changed by tg3_setup_phy if we
* run at gigabit with half duplex.
*/
- tw32(MAC_TX_LENGTHS,
- (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
- (6 << TX_LENGTHS_IPG_SHIFT) |
- (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
+ val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+ (6 << TX_LENGTHS_IPG_SHIFT) |
+ (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ val |= tr32(MAC_TX_LENGTHS) &
+ (TX_LENGTHS_JMB_FRM_LEN_MSK |
+ TX_LENGTHS_CNT_DWN_VAL_MSK);
+
+ tw32(MAC_TX_LENGTHS, val);
/* Receive rules. */
tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
@@ -8195,33 +8467,39 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
- if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
+ if (tg3_flag(tp, TSO_CAPABLE) &&
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
- !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
+ !tg3_flag(tp, IS_5788)) {
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
}
}
- if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
+ if (tg3_flag(tp, PCI_EXPRESS))
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
- if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
- if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
+ if (tg3_flag(tp, HW_TSO_3) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
+ tg3_flag(tp, 57765_PLUS)) {
val = tr32(TG3_RDMA_RSRVCTRL_REG);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
@@ -8233,7 +8511,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
@@ -8241,12 +8520,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
/* Receive/send statistics. */
- if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
+ if (tg3_flag(tp, 5750_PLUS)) {
val = tr32(RCVLPC_STATS_ENABLE);
val &= ~RCVLPC_STATSENAB_DACK_FIX;
tw32(RCVLPC_STATS_ENABLE, val);
} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
- (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
+ tg3_flag(tp, TSO_CAPABLE)) {
val = tr32(RCVLPC_STATS_ENABLE);
val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
tw32(RCVLPC_STATS_ENABLE, val);
@@ -8269,7 +8548,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
__tg3_set_coalesce(tp, &tp->coal);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ if (!tg3_flag(tp, 5705_PLUS)) {
/* Status/statistics block address. See tg3_timer,
* the tg3_periodic_fetch_stats call there, and
* tg3_get_stats to see how this works for 5705/5750 chips.
@@ -8295,7 +8574,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!tg3_flag(tp, 5705_PLUS))
tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
@@ -8305,13 +8584,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(10);
}
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ if (tg3_flag(tp, ENABLE_APE))
tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
else
tp->mac_mode = 0;
tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+ if (!tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
tp->mac_mode |= MAC_MODE_LINK_POLARITY;
@@ -8319,12 +8598,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(40);
/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
- * If TG3_FLG2_IS_NIC is zero, we should read the
+ * If TG3_FLAG_IS_NIC is zero, we should read the
* register to preserve the GPIO settings for LOMs. The GPIOs,
* whether used as inputs or outputs, are set by boot code after
* reset.
*/
- if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
+ if (!tg3_flag(tp, IS_NIC)) {
u32 gpio_mask;
gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
@@ -8342,21 +8621,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
/* GPIO1 must be driven high for eeprom write protect */
- if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
+ if (tg3_flag(tp, EEPROM_WRITE_PROT))
tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
GRC_LCLCTRL_GPIO_OUTPUT1);
}
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
- if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
- tp->irq_cnt > 1) {
+ if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
val = tr32(MSGINT_MODE);
val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
tw32(MSGINT_MODE, val);
}
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ if (!tg3_flag(tp, 5705_PLUS)) {
tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
udelay(40);
}
@@ -8369,18 +8647,18 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
- if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
+ if (tg3_flag(tp, TSO_CAPABLE) &&
(tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
/* nothing */
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
- !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
+ !tg3_flag(tp, IS_5788)) {
val |= WDMAC_MODE_RX_ACCEL;
}
}
/* Enable host coalescing bug fix */
- if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
+ if (tg3_flag(tp, 5755_PLUS))
val |= WDMAC_MODE_STATUS_TAG_FIX;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
@@ -8389,7 +8667,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(WDMAC_MODE, val);
udelay(40);
- if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
+ if (tg3_flag(tp, PCIX_MODE)) {
u16 pcix_cmd;
pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
@@ -8409,7 +8687,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(40);
tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!tg3_flag(tp, 5705_PLUS))
tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
@@ -8421,15 +8699,16 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_flag(tp, LRG_PROD_RING_CAP))
val |= RCVDBDI_MODE_LRG_RING_SZ;
tw32(RCVDBDI_MODE, val);
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
- if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3))
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ if (tg3_flag(tp, ENABLE_TSS))
val |= SNDBDI_MODE_MULTI_TXQ_EN;
tw32(SNDBDI_MODE, val);
tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
@@ -8440,20 +8719,28 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
return err;
}
- if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
+ if (tg3_flag(tp, TSO_CAPABLE)) {
err = tg3_load_tso_firmware(tp);
if (err)
return err;
}
tp->tx_mode = TX_MODE_ENABLE;
- if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
+
+ if (tg3_flag(tp, 5755_PLUS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
+ tp->tx_mode &= ~val;
+ tp->tx_mode |= tr32(MAC_TX_MODE) & val;
+ }
+
tw32_f(MAC_TX_MODE, tp->tx_mode);
udelay(100);
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
+ if (tg3_flag(tp, ENABLE_RSS)) {
u32 reg = MAC_RSS_INDIR_TBL_0;
u8 *ent = (u8 *)&val;
@@ -8482,10 +8769,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
tp->rx_mode = RX_MODE_ENABLE;
- if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
+ if (tg3_flag(tp, 5755_PLUS))
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
+ if (tg3_flag(tp, ENABLE_RSS))
tp->rx_mode |= RX_MODE_RSS_ENABLE |
RX_MODE_RSS_ITBL_HASH_BITS_7 |
RX_MODE_RSS_IPV6_HASH_EN |
@@ -8532,7 +8819,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
/* Use hardware link auto-negotiation */
- tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
+ tg3_flag_set(tp, HW_AUTONEG);
}
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
@@ -8546,7 +8833,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
}
- if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
+ if (!tg3_flag(tp, USE_PHYLIB)) {
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
tp->link_config.speed = tp->link_config.orig_speed;
@@ -8579,12 +8866,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
- if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
+ if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
limit = 8;
else
limit = 16;
- if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
+ if (tg3_flag(tp, ENABLE_ASF))
limit -= 4;
switch (limit) {
case 16:
@@ -8622,7 +8908,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
break;
}
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ if (tg3_flag(tp, ENABLE_APE))
/* Write our heartbeat update interval to APE. */
tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
APE_HOST_HEARTBEAT_INT_DISABLE);
@@ -8688,7 +8974,19 @@ static void tg3_periodic_fetch_stats(struct tg3 *tp)
TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
- TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
+ TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
+ } else {
+ u32 val = tr32(HOSTCC_FLOW_ATTN);
+ val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
+ if (val) {
+ tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
+ sp->rx_discards.low += val;
+ if (sp->rx_discards.low < val)
+ sp->rx_discards.high += 1;
+ }
+ sp->mbuf_lwm_thresh_hit = sp->rx_discards;
+ }
TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
}
@@ -8701,7 +8999,7 @@ static void tg3_timer(unsigned long __opaque)
spin_lock(&tp->lock);
- if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
+ if (!tg3_flag(tp, TAGGED_STATUS)) {
/* All of this garbage is because when using non-tagged
* IRQ status the mailbox/status_block protocol the chip
* uses with the cpu is race prone.
@@ -8715,7 +9013,7 @@ static void tg3_timer(unsigned long __opaque)
}
if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
- tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
+ tg3_flag_set(tp, RESTART_TIMER);
spin_unlock(&tp->lock);
schedule_work(&tp->reset_task);
return;
@@ -8724,7 +9022,7 @@ static void tg3_timer(unsigned long __opaque)
/* This part only runs once per second. */
if (!--tp->timer_counter) {
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ if (tg3_flag(tp, 5705_PLUS))
tg3_periodic_fetch_stats(tp);
if (tp->setlpicnt && !--tp->setlpicnt) {
@@ -8733,7 +9031,7 @@ static void tg3_timer(unsigned long __opaque)
val | TG3_CPMU_EEEMD_LPI_ENABLE);
}
- if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
+ if (tg3_flag(tp, USE_LINKCHG_REG)) {
u32 mac_stat;
int phy_event;
@@ -8748,7 +9046,7 @@ static void tg3_timer(unsigned long __opaque)
if (phy_event)
tg3_setup_phy(tp, 0);
- } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
+ } else if (tg3_flag(tp, POLL_SERDES)) {
u32 mac_stat = tr32(MAC_STATUS);
int need_setup = 0;
@@ -8773,7 +9071,7 @@ static void tg3_timer(unsigned long __opaque)
tg3_setup_phy(tp, 0);
}
} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ tg3_flag(tp, 5780_CLASS)) {
tg3_serdes_parallel_detect(tp);
}
@@ -8798,8 +9096,7 @@ static void tg3_timer(unsigned long __opaque)
* resets.
*/
if (!--tp->asf_counter) {
- if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
- !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
+ if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
tg3_wait_for_event_ack(tp);
tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
@@ -8835,16 +9132,16 @@ static int tg3_request_irq(struct tg3 *tp, int irq_num)
name[IFNAMSIZ-1] = 0;
}
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
+ if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
fn = tg3_msi;
- if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
+ if (tg3_flag(tp, 1SHOT_MSI))
fn = tg3_msi_1shot;
- flags = IRQF_SAMPLE_RANDOM;
+ flags = 0;
} else {
fn = tg3_interrupt;
- if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+ if (tg3_flag(tp, TAGGED_STATUS))
fn = tg3_interrupt_tagged;
- flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
+ flags = IRQF_SHARED;
}
return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
@@ -8868,8 +9165,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
* Turn off MSI one shot mode. Otherwise this test has no
* observable way to know whether the interrupt was delivered.
*/
- if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
- (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
+ if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
tw32(MSGINT_MODE, val);
}
@@ -8911,8 +9207,7 @@ static int tg3_test_interrupt(struct tg3 *tp)
if (intr_ok) {
/* Reenable MSI one shot mode. */
- if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
- (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
+ if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
tw32(MSGINT_MODE, val);
}
@@ -8930,7 +9225,7 @@ static int tg3_test_msi(struct tg3 *tp)
int err;
u16 pci_cmd;
- if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
+ if (!tg3_flag(tp, USING_MSI))
return 0;
/* Turn off SERR reporting in case MSI terminates with Master
@@ -8960,7 +9255,7 @@ static int tg3_test_msi(struct tg3 *tp)
pci_disable_msi(tp->pdev);
- tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
+ tg3_flag_clear(tp, USING_MSI);
tp->napi[0].irq_vec = tp->pdev->irq;
err = tg3_request_irq(tp, 0);
@@ -9057,9 +9352,11 @@ static bool tg3_enable_msix(struct tg3 *tp)
}
if (tp->irq_cnt > 1) {
- tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
- tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
+ tg3_flag_set(tp, ENABLE_RSS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+ tg3_flag_set(tp, ENABLE_TSS);
netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
}
}
@@ -9069,8 +9366,8 @@ static bool tg3_enable_msix(struct tg3 *tp)
static void tg3_ints_init(struct tg3 *tp)
{
- if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
- !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
+ if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
+ !tg3_flag(tp, TAGGED_STATUS)) {
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
@@ -9079,21 +9376,19 @@ static void tg3_ints_init(struct tg3 *tp)
goto defcfg;
}
- if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
- tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
- else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
- pci_enable_msi(tp->pdev) == 0)
- tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
+ if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
+ tg3_flag_set(tp, USING_MSIX);
+ else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
+ tg3_flag_set(tp, USING_MSI);
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
+ if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
u32 msi_mode = tr32(MSGINT_MODE);
- if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
- tp->irq_cnt > 1)
+ if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
msi_mode |= MSGINT_MODE_MULTIVEC_EN;
tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
}
defcfg:
- if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
+ if (!tg3_flag(tp, USING_MSIX)) {
tp->irq_cnt = 1;
tp->napi[0].irq_vec = tp->pdev->irq;
netif_set_real_num_tx_queues(tp->dev, 1);
@@ -9103,12 +9398,14 @@ defcfg:
static void tg3_ints_fini(struct tg3 *tp)
{
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ if (tg3_flag(tp, USING_MSIX))
pci_disable_msix(tp->pdev);
- else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
+ else if (tg3_flag(tp, USING_MSI))
pci_disable_msi(tp->pdev);
- tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
- tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
+ tg3_flag_clear(tp, USING_MSI);
+ tg3_flag_clear(tp, USING_MSIX);
+ tg3_flag_clear(tp, ENABLE_RSS);
+ tg3_flag_clear(tp, ENABLE_TSS);
}
static int tg3_open(struct net_device *dev)
@@ -9123,10 +9420,10 @@ static int tg3_open(struct net_device *dev)
return err;
} else if (err) {
netdev_warn(tp->dev, "TSO capability disabled\n");
- tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
- } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ } else if (!tg3_flag(tp, TSO_CAPABLE)) {
netdev_notice(tp->dev, "TSO capability restored\n");
- tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
+ tg3_flag_set(tp, TSO_CAPABLE);
}
}
@@ -9139,7 +9436,7 @@ static int tg3_open(struct net_device *dev)
tg3_full_lock(tp, 0);
tg3_disable_ints(tp);
- tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+ tg3_flag_clear(tp, INIT_COMPLETE);
tg3_full_unlock(tp);
@@ -9180,7 +9477,7 @@ static int tg3_open(struct net_device *dev)
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
} else {
- if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
+ if (tg3_flag(tp, TAGGED_STATUS))
tp->timer_offset = HZ;
else
tp->timer_offset = HZ / 10;
@@ -9202,7 +9499,7 @@ static int tg3_open(struct net_device *dev)
if (err)
goto err_out3;
- if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
+ if (tg3_flag(tp, USING_MSI)) {
err = tg3_test_msi(tp);
if (err) {
@@ -9214,8 +9511,7 @@ static int tg3_open(struct net_device *dev)
goto err_out2;
}
- if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
- (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
+ if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
u32 val = tr32(PCIE_TRANSACTION_CFG);
tw32(PCIE_TRANSACTION_CFG,
@@ -9228,13 +9524,20 @@ static int tg3_open(struct net_device *dev)
tg3_full_lock(tp, 0);
add_timer(&tp->timer);
- tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+ tg3_flag_set(tp, INIT_COMPLETE);
tg3_enable_ints(tp);
tg3_full_unlock(tp);
netif_tx_start_all_queues(dev);
+ /*
+ * Reset loopback feature if it was turned on while the device was down
+ * make sure that it's installed properly now.
+ */
+ if (dev->features & NETIF_F_LOOPBACK)
+ tg3_set_loopback(dev, dev->features);
+
return 0;
err_out3:
@@ -9277,7 +9580,7 @@ static int tg3_close(struct net_device *dev)
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_free_rings(tp);
- tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+ tg3_flag_clear(tp, INIT_COMPLETE);
tg3_full_unlock(tp);
@@ -9534,7 +9837,7 @@ static void __tg3_set_rx_mode(struct net_device *dev)
/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
* flag clear.
*/
- if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ if (!tg3_flag(tp, ENABLE_ASF))
rx_mode |= RX_MODE_KEEP_VLAN_TAG;
#endif
@@ -9588,82 +9891,26 @@ static void tg3_set_rx_mode(struct net_device *dev)
tg3_full_unlock(tp);
}
-#define TG3_REGDUMP_LEN (32 * 1024)
-
static int tg3_get_regs_len(struct net_device *dev)
{
- return TG3_REGDUMP_LEN;
+ return TG3_REG_BLK_SIZE;
}
static void tg3_get_regs(struct net_device *dev,
struct ethtool_regs *regs, void *_p)
{
- u32 *p = _p;
struct tg3 *tp = netdev_priv(dev);
- u8 *orig_p = _p;
- int i;
regs->version = 0;
- memset(p, 0, TG3_REGDUMP_LEN);
+ memset(_p, 0, TG3_REG_BLK_SIZE);
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
return;
tg3_full_lock(tp, 0);
-#define __GET_REG32(reg) (*(p)++ = tr32(reg))
-#define GET_REG32_LOOP(base, len) \
-do { p = (u32 *)(orig_p + (base)); \
- for (i = 0; i < len; i += 4) \
- __GET_REG32((base) + i); \
-} while (0)
-#define GET_REG32_1(reg) \
-do { p = (u32 *)(orig_p + (reg)); \
- __GET_REG32((reg)); \
-} while (0)
-
- GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
- GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
- GET_REG32_LOOP(MAC_MODE, 0x4f0);
- GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
- GET_REG32_1(SNDDATAC_MODE);
- GET_REG32_LOOP(SNDBDS_MODE, 0x80);
- GET_REG32_LOOP(SNDBDI_MODE, 0x48);
- GET_REG32_1(SNDBDC_MODE);
- GET_REG32_LOOP(RCVLPC_MODE, 0x20);
- GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
- GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
- GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
- GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
- GET_REG32_1(RCVDCC_MODE);
- GET_REG32_LOOP(RCVBDI_MODE, 0x20);
- GET_REG32_LOOP(RCVCC_MODE, 0x14);
- GET_REG32_LOOP(RCVLSC_MODE, 0x08);
- GET_REG32_1(MBFREE_MODE);
- GET_REG32_LOOP(HOSTCC_MODE, 0x100);
- GET_REG32_LOOP(MEMARB_MODE, 0x10);
- GET_REG32_LOOP(BUFMGR_MODE, 0x58);
- GET_REG32_LOOP(RDMAC_MODE, 0x08);
- GET_REG32_LOOP(WDMAC_MODE, 0x08);
- GET_REG32_1(RX_CPU_MODE);
- GET_REG32_1(RX_CPU_STATE);
- GET_REG32_1(RX_CPU_PGMCTR);
- GET_REG32_1(RX_CPU_HWBKPT);
- GET_REG32_1(TX_CPU_MODE);
- GET_REG32_1(TX_CPU_STATE);
- GET_REG32_1(TX_CPU_PGMCTR);
- GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
- GET_REG32_LOOP(FTQ_RESET, 0x120);
- GET_REG32_LOOP(MSGINT_MODE, 0x0c);
- GET_REG32_1(DMAC_MODE);
- GET_REG32_LOOP(GRC_MODE, 0x4c);
- if (tp->tg3_flags & TG3_FLAG_NVRAM)
- GET_REG32_LOOP(NVRAM_CMD, 0x24);
-
-#undef __GET_REG32
-#undef GET_REG32_LOOP
-#undef GET_REG32_1
+ tg3_dump_legacy_regs(tp, (u32 *)_p);
tg3_full_unlock(tp);
}
@@ -9683,7 +9930,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u32 i, offset, len, b_offset, b_count;
__be32 val;
- if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
+ if (tg3_flag(tp, NO_NVRAM))
return -EINVAL;
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
@@ -9712,7 +9959,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
eeprom->len += b_count;
}
- /* read bytes upto the last 4 byte boundary */
+ /* read bytes up to the last 4 byte boundary */
pd = &data[eeprom->len];
for (i = 0; i < (len - (len & 3)); i += 4) {
ret = tg3_nvram_read_be32(tp, offset + i, &val);
@@ -9751,7 +9998,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
return -EAGAIN;
- if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
+ if (tg3_flag(tp, NO_NVRAM) ||
eeprom->magic != TG3_EEPROM_MAGIC)
return -EINVAL;
@@ -9803,7 +10050,7 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tg3 *tp = netdev_priv(dev);
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+ if (tg3_flag(tp, USE_PHYLIB)) {
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
@@ -9831,10 +10078,10 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising = tp->link_config.advertising;
if (netif_running(dev)) {
- cmd->speed = tp->link_config.active_speed;
+ ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
cmd->duplex = tp->link_config.active_duplex;
} else {
- cmd->speed = SPEED_INVALID;
+ ethtool_cmd_speed_set(cmd, SPEED_INVALID);
cmd->duplex = DUPLEX_INVALID;
}
cmd->phy_address = tp->phy_addr;
@@ -9848,8 +10095,9 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct tg3 *tp = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+ if (tg3_flag(tp, USE_PHYLIB)) {
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
@@ -9897,14 +10145,14 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising &= mask;
} else {
if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
- if (cmd->speed != SPEED_1000)
+ if (speed != SPEED_1000)
return -EINVAL;
if (cmd->duplex != DUPLEX_FULL)
return -EINVAL;
} else {
- if (cmd->speed != SPEED_100 &&
- cmd->speed != SPEED_10)
+ if (speed != SPEED_100 &&
+ speed != SPEED_10)
return -EINVAL;
}
}
@@ -9919,7 +10167,7 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
tp->link_config.duplex = DUPLEX_INVALID;
} else {
tp->link_config.advertising = 0;
- tp->link_config.speed = cmd->speed;
+ tp->link_config.speed = speed;
tp->link_config.duplex = cmd->duplex;
}
@@ -9949,14 +10197,12 @@ static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct tg3 *tp = netdev_priv(dev);
- if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
- device_can_wakeup(&tp->pdev->dev))
+ if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
wol->supported = WAKE_MAGIC;
else
wol->supported = 0;
wol->wolopts = 0;
- if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
- device_can_wakeup(&tp->pdev->dev))
+ if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
wol->wolopts = WAKE_MAGIC;
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
@@ -9969,19 +10215,18 @@ static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
if ((wol->wolopts & WAKE_MAGIC) &&
- !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
+ !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
return -EINVAL;
device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
spin_lock_bh(&tp->lock);
if (device_may_wakeup(dp))
- tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
+ tg3_flag_set(tp, WOL_ENABLE);
else
- tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
+ tg3_flag_clear(tp, WOL_ENABLE);
spin_unlock_bh(&tp->lock);
-
return 0;
}
@@ -9997,33 +10242,6 @@ static void tg3_set_msglevel(struct net_device *dev, u32 value)
tp->msg_enable = value;
}
-static int tg3_set_tso(struct net_device *dev, u32 value)
-{
- struct tg3 *tp = netdev_priv(dev);
-
- if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
- if (value)
- return -EINVAL;
- return 0;
- }
- if ((dev->features & NETIF_F_IPV6_CSUM) &&
- ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
- (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
- if (value) {
- dev->features |= NETIF_F_TSO6;
- if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
- (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
- dev->features |= NETIF_F_TSO_ECN;
- } else
- dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
- }
- return ethtool_op_set_tso(dev, value);
-}
-
static int tg3_nway_reset(struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
@@ -10035,7 +10253,7 @@ static int tg3_nway_reset(struct net_device *dev)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
return -EINVAL;
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+ if (tg3_flag(tp, USE_PHYLIB)) {
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
@@ -10064,7 +10282,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
ering->rx_max_pending = tp->rx_std_ring_mask;
ering->rx_mini_max_pending = 0;
- if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
+ if (tg3_flag(tp, JUMBO_RING_ENABLE))
ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
else
ering->rx_jumbo_max_pending = 0;
@@ -10073,7 +10291,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
ering->rx_pending = tp->rx_pending;
ering->rx_mini_pending = 0;
- if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
+ if (tg3_flag(tp, JUMBO_RING_ENABLE))
ering->rx_jumbo_pending = tp->rx_jumbo_pending;
else
ering->rx_jumbo_pending = 0;
@@ -10090,7 +10308,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
(ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
(ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
(ering->tx_pending <= MAX_SKB_FRAGS) ||
- ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
+ (tg3_flag(tp, TSO_BUG) &&
(ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
return -EINVAL;
@@ -10104,7 +10322,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
tp->rx_pending = ering->rx_pending;
- if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
+ if (tg3_flag(tp, MAX_RXPEND_64) &&
tp->rx_pending > 63)
tp->rx_pending = 63;
tp->rx_jumbo_pending = ering->rx_jumbo_pending;
@@ -10131,7 +10349,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
{
struct tg3 *tp = netdev_priv(dev);
- epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
+ epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
epause->rx_pause = 1;
@@ -10149,7 +10367,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
struct tg3 *tp = netdev_priv(dev);
int err = 0;
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+ if (tg3_flag(tp, USE_PHYLIB)) {
u32 newadv;
struct phy_device *phydev;
@@ -10177,9 +10395,9 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
newadv = 0;
if (epause->autoneg)
- tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+ tg3_flag_set(tp, PAUSE_AUTONEG);
else
- tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
+ tg3_flag_clear(tp, PAUSE_AUTONEG);
if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
u32 oldadv = phydev->advertising &
@@ -10221,9 +10439,9 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
tg3_full_lock(tp, irq_sync);
if (epause->autoneg)
- tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+ tg3_flag_set(tp, PAUSE_AUTONEG);
else
- tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
+ tg3_flag_clear(tp, PAUSE_AUTONEG);
if (epause->rx_pause)
tp->link_config.flowctrl |= FLOW_CTRL_RX;
else
@@ -10246,50 +10464,6 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
return err;
}
-static u32 tg3_get_rx_csum(struct net_device *dev)
-{
- struct tg3 *tp = netdev_priv(dev);
- return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
-}
-
-static int tg3_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct tg3 *tp = netdev_priv(dev);
-
- if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
- if (data != 0)
- return -EINVAL;
- return 0;
- }
-
- spin_lock_bh(&tp->lock);
- if (data)
- tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
- else
- tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
- spin_unlock_bh(&tp->lock);
-
- return 0;
-}
-
-static int tg3_set_tx_csum(struct net_device *dev, u32 data)
-{
- struct tg3 *tp = netdev_priv(dev);
-
- if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
- if (data != 0)
- return -EINVAL;
- return 0;
- }
-
- if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
- ethtool_op_set_tx_ipv6_csum(dev, data);
- else
- ethtool_op_set_tx_csum(dev, data);
-
- return 0;
-}
-
static int tg3_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
@@ -10317,35 +10491,38 @@ static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
}
}
-static int tg3_phys_id(struct net_device *dev, u32 data)
+static int tg3_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
struct tg3 *tp = netdev_priv(dev);
- int i;
if (!netif_running(tp->dev))
return -EAGAIN;
- if (data == 0)
- data = UINT_MAX / 2;
-
- for (i = 0; i < (data * 2); i++) {
- if ((i % 2) == 0)
- tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
- LED_CTRL_1000MBPS_ON |
- LED_CTRL_100MBPS_ON |
- LED_CTRL_10MBPS_ON |
- LED_CTRL_TRAFFIC_OVERRIDE |
- LED_CTRL_TRAFFIC_BLINK |
- LED_CTRL_TRAFFIC_LED);
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return 1; /* cycle on/off once per second */
+
+ case ETHTOOL_ID_ON:
+ tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_1000MBPS_ON |
+ LED_CTRL_100MBPS_ON |
+ LED_CTRL_10MBPS_ON |
+ LED_CTRL_TRAFFIC_OVERRIDE |
+ LED_CTRL_TRAFFIC_BLINK |
+ LED_CTRL_TRAFFIC_LED);
+ break;
- else
- tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
- LED_CTRL_TRAFFIC_OVERRIDE);
+ case ETHTOOL_ID_OFF:
+ tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+ LED_CTRL_TRAFFIC_OVERRIDE);
+ break;
- if (msleep_interruptible(500))
- break;
+ case ETHTOOL_ID_INACTIVE:
+ tw32(MAC_LED_CTRL, tp->led_ctrl);
+ break;
}
- tw32(MAC_LED_CTRL, tp->led_ctrl);
+
return 0;
}
@@ -10356,6 +10533,80 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
}
+static __be32 * tg3_vpd_readblock(struct tg3 *tp)
+{
+ int i;
+ __be32 *buf;
+ u32 offset = 0, len = 0;
+ u32 magic, val;
+
+ if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
+ return NULL;
+
+ if (magic == TG3_EEPROM_MAGIC) {
+ for (offset = TG3_NVM_DIR_START;
+ offset < TG3_NVM_DIR_END;
+ offset += TG3_NVM_DIRENT_SIZE) {
+ if (tg3_nvram_read(tp, offset, &val))
+ return NULL;
+
+ if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
+ TG3_NVM_DIRTYPE_EXTVPD)
+ break;
+ }
+
+ if (offset != TG3_NVM_DIR_END) {
+ len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
+ if (tg3_nvram_read(tp, offset + 4, &offset))
+ return NULL;
+
+ offset = tg3_nvram_logical_addr(tp, offset);
+ }
+ }
+
+ if (!offset || !len) {
+ offset = TG3_NVM_VPD_OFF;
+ len = TG3_NVM_VPD_LEN;
+ }
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (buf == NULL)
+ return NULL;
+
+ if (magic == TG3_EEPROM_MAGIC) {
+ for (i = 0; i < len; i += 4) {
+ /* The data is in little-endian format in NVRAM.
+ * Use the big-endian read routines to preserve
+ * the byte order as it exists in NVRAM.
+ */
+ if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
+ goto error;
+ }
+ } else {
+ u8 *ptr;
+ ssize_t cnt;
+ unsigned int pos = 0;
+
+ ptr = (u8 *)&buf[0];
+ for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
+ cnt = pci_read_vpd(tp->pdev, pos,
+ len - pos, ptr);
+ if (cnt == -ETIMEDOUT || cnt == -EINTR)
+ cnt = 0;
+ else if (cnt < 0)
+ goto error;
+ }
+ if (pos != len)
+ goto error;
+ }
+
+ return buf;
+
+error:
+ kfree(buf);
+ return NULL;
+}
+
#define NVRAM_TEST_SIZE 0x100
#define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
#define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
@@ -10369,7 +10620,7 @@ static int tg3_test_nvram(struct tg3 *tp)
__be32 *buf;
int i, j, k, err = 0, size;
- if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
+ if (tg3_flag(tp, NO_NVRAM))
return 0;
if (tg3_nvram_read(tp, 0, &magic) != 0)
@@ -10495,14 +10746,11 @@ static int tg3_test_nvram(struct tg3 *tp)
if (csum != le32_to_cpu(buf[0xfc/4]))
goto out;
- for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
- /* The data is in little-endian format in NVRAM.
- * Use the big-endian read routines to preserve
- * the byte order as it exists in NVRAM.
- */
- if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &buf[i/4]))
- goto out;
- }
+ kfree(buf);
+
+ buf = tg3_vpd_readblock(tp);
+ if (!buf)
+ return -ENOMEM;
i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
PCI_VPD_LRDT_RO_DATA);
@@ -10714,9 +10962,9 @@ static int tg3_test_registers(struct tg3 *tp)
};
is_5705 = is_5750 = 0;
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ if (tg3_flag(tp, 5705_PLUS)) {
is_5705 = 1;
- if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
+ if (tg3_flag(tp, 5750_PLUS))
is_5750 = 1;
}
@@ -10727,7 +10975,7 @@ static int tg3_test_registers(struct tg3 *tp)
if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
continue;
- if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
+ if (tg3_flag(tp, IS_5788) &&
(reg_tbl[i].flags & TG3_FL_NOT_5788))
continue;
@@ -10850,16 +11098,15 @@ static int tg3_test_memory(struct tg3 *tp)
int err = 0;
int i;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (tg3_flag(tp, 5717_PLUS))
mem_tbl = mem_tbl_5717;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
mem_tbl = mem_tbl_57765;
- else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
+ else if (tg3_flag(tp, 5755_PLUS))
mem_tbl = mem_tbl_5755;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
- else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
+ else if (tg3_flag(tp, 5705_PLUS))
mem_tbl = mem_tbl_5705;
else
mem_tbl = mem_tbl_570x;
@@ -10875,11 +11122,35 @@ static int tg3_test_memory(struct tg3 *tp)
#define TG3_MAC_LOOPBACK 0
#define TG3_PHY_LOOPBACK 1
+#define TG3_TSO_LOOPBACK 2
+
+#define TG3_TSO_MSS 500
+
+#define TG3_TSO_IP_HDR_LEN 20
+#define TG3_TSO_TCP_HDR_LEN 20
+#define TG3_TSO_TCP_OPT_LEN 12
+
+static const u8 tg3_tso_header[] = {
+0x08, 0x00,
+0x45, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00,
+0x40, 0x06, 0x00, 0x00,
+0x0a, 0x00, 0x00, 0x01,
+0x0a, 0x00, 0x00, 0x02,
+0x0d, 0x00, 0xe0, 0x00,
+0x00, 0x00, 0x01, 0x00,
+0x00, 0x00, 0x02, 0x00,
+0x80, 0x10, 0x10, 0x00,
+0x14, 0x09, 0x00, 0x00,
+0x01, 0x01, 0x08, 0x0a,
+0x11, 0x11, 0x11, 0x11,
+0x11, 0x11, 0x11, 0x11,
+};
-static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
+static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
{
u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
- u32 desc_idx, coal_now;
+ u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
struct sk_buff *skb, *rx_skb;
u8 *tx_data;
dma_addr_t map;
@@ -10891,9 +11162,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
tnapi = &tp->napi[0];
rnapi = &tp->napi[0];
if (tp->irq_cnt > 1) {
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
+ if (tg3_flag(tp, ENABLE_RSS))
rnapi = &tp->napi[1];
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ if (tg3_flag(tp, ENABLE_TSS))
tnapi = &tp->napi[1];
}
coal_now = tnapi->coal_now | rnapi->coal_now;
@@ -10905,22 +11176,20 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
* all newer ASIC revisions.
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
- (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
+ tg3_flag(tp, CPMU_PRESENT))
return 0;
mac_mode = tp->mac_mode &
~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
mac_mode |= MAC_MODE_PORT_INT_LPBACK;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!tg3_flag(tp, 5705_PLUS))
mac_mode |= MAC_MODE_LINK_POLARITY;
if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
mac_mode |= MAC_MODE_PORT_MODE_MII;
else
mac_mode |= MAC_MODE_PORT_MODE_GMII;
tw32(MAC_MODE, mac_mode);
- } else if (loopback_mode == TG3_PHY_LOOPBACK) {
- u32 val;
-
+ } else {
if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
tg3_phy_fet_toggle_apd(tp, false);
val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
@@ -10968,13 +11237,11 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
break;
mdelay(1);
}
- } else {
- return -EINVAL;
}
err = -EIO;
- tx_len = 1514;
+ tx_len = pktsz;
skb = netdev_alloc_skb(tp->dev, tx_len);
if (!skb)
return -ENOMEM;
@@ -10983,9 +11250,58 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
memcpy(tx_data, tp->dev->dev_addr, 6);
memset(tx_data + 6, 0x0, 8);
- tw32(MAC_RX_MTU_SIZE, tx_len + 4);
+ tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
+
+ if (loopback_mode == TG3_TSO_LOOPBACK) {
+ struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
+
+ u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
+ TG3_TSO_TCP_OPT_LEN;
+
+ memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
+ sizeof(tg3_tso_header));
+ mss = TG3_TSO_MSS;
+
+ val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
+ num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
+
+ /* Set the total length field in the IP header */
+ iph->tot_len = htons((u16)(mss + hdr_len));
+
+ base_flags = (TXD_FLAG_CPU_PRE_DMA |
+ TXD_FLAG_CPU_POST_DMA);
+
+ if (tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3)) {
+ struct tcphdr *th;
+ val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
+ th = (struct tcphdr *)&tx_data[val];
+ th->check = 0;
+ } else
+ base_flags |= TXD_FLAG_TCPUDP_CSUM;
+
+ if (tg3_flag(tp, HW_TSO_3)) {
+ mss |= (hdr_len & 0xc) << 12;
+ if (hdr_len & 0x10)
+ base_flags |= 0x00000010;
+ base_flags |= (hdr_len & 0x3e0) << 5;
+ } else if (tg3_flag(tp, HW_TSO_2))
+ mss |= hdr_len << 9;
+ else if (tg3_flag(tp, HW_TSO_1) ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+ mss |= (TG3_TSO_TCP_OPT_LEN << 9);
+ } else {
+ base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
+ }
+
+ data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
+ } else {
+ num_pkts = 1;
+ data_off = ETH_HLEN;
+ }
- for (i = 14; i < tx_len; i++)
+ for (i = data_off; i < tx_len; i++)
tx_data[i] = (u8) (i & 0xff);
map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
@@ -11001,12 +11317,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
- num_pkts = 0;
-
- tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
+ tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
+ base_flags, (mss << 1) | 1);
tnapi->tx_prod++;
- num_pkts++;
tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
tr32_mailbox(tnapi->prodmbox);
@@ -11036,29 +11350,56 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
if (rx_idx != rx_start_idx + num_pkts)
goto out;
- desc = &rnapi->rx_rcb[rx_start_idx];
- desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
- opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
- if (opaque_key != RXD_OPAQUE_RING_STD)
- goto out;
+ val = data_off;
+ while (rx_idx != rx_start_idx) {
+ desc = &rnapi->rx_rcb[rx_start_idx++];
+ desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+ opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
- if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
- (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
- goto out;
+ if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+ (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
+ goto out;
- rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
- if (rx_len != tx_len)
- goto out;
+ rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
+ - ETH_FCS_LEN;
- rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+ if (loopback_mode != TG3_TSO_LOOPBACK) {
+ if (rx_len != tx_len)
+ goto out;
- map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
- pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
+ if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
+ if (opaque_key != RXD_OPAQUE_RING_STD)
+ goto out;
+ } else {
+ if (opaque_key != RXD_OPAQUE_RING_JUMBO)
+ goto out;
+ }
+ } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
+ (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
+ >> RXD_TCPCSUM_SHIFT == 0xffff) {
+ goto out;
+ }
- for (i = 14; i < tx_len; i++) {
- if (*(rx_skb->data + i) != (u8) (i & 0xff))
+ if (opaque_key == RXD_OPAQUE_RING_STD) {
+ rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+ map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
+ mapping);
+ } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+ rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+ map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
+ mapping);
+ } else
goto out;
+
+ pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
+ PCI_DMA_FROMDEVICE);
+
+ for (i = data_off; i < rx_len; i++, val++) {
+ if (*(rx_skb->data + i) != (u8) (val & 0xff))
+ goto out;
+ }
}
+
err = 0;
/* tg3_free_rings will unmap and free the rx_skb */
@@ -11066,10 +11407,13 @@ out:
return err;
}
-#define TG3_MAC_LOOPBACK_FAILED 1
-#define TG3_PHY_LOOPBACK_FAILED 2
-#define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
- TG3_PHY_LOOPBACK_FAILED)
+#define TG3_STD_LOOPBACK_FAILED 1
+#define TG3_JMB_LOOPBACK_FAILED 2
+#define TG3_TSO_LOOPBACK_FAILED 4
+
+#define TG3_MAC_LOOPBACK_SHIFT 0
+#define TG3_PHY_LOOPBACK_SHIFT 4
+#define TG3_LOOPBACK_FAILED 0x00000077
static int tg3_test_loopback(struct tg3 *tp)
{
@@ -11088,11 +11432,20 @@ static int tg3_test_loopback(struct tg3 *tp)
goto done;
}
+ if (tg3_flag(tp, ENABLE_RSS)) {
+ int i;
+
+ /* Reroute all rx packets to the 1st queue */
+ for (i = MAC_RSS_INDIR_TBL_0;
+ i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
+ tw32(i, 0x0);
+ }
+
/* Turn off gphy autopowerdown. */
if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
tg3_phy_toggle_apd(tp, false);
- if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
+ if (tg3_flag(tp, CPMU_PRESENT)) {
int i;
u32 status;
@@ -11118,10 +11471,14 @@ static int tg3_test_loopback(struct tg3 *tp)
CPMU_CTRL_LINK_AWARE_MODE));
}
- if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
- err |= TG3_MAC_LOOPBACK_FAILED;
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
+ err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
+
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
+ err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
- if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
+ if (tg3_flag(tp, CPMU_PRESENT)) {
tw32(TG3_CPMU_CTRL, cpmuctrl);
/* Release the mutex */
@@ -11129,9 +11486,18 @@ static int tg3_test_loopback(struct tg3 *tp)
}
if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
- !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
- if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
- err |= TG3_PHY_LOOPBACK_FAILED;
+ !tg3_flag(tp, USE_PHYLIB)) {
+ if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
+ err |= TG3_STD_LOOPBACK_FAILED <<
+ TG3_PHY_LOOPBACK_SHIFT;
+ if (tg3_flag(tp, TSO_CAPABLE) &&
+ tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
+ err |= TG3_TSO_LOOPBACK_FAILED <<
+ TG3_PHY_LOOPBACK_SHIFT;
+ if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+ tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
+ err |= TG3_JMB_LOOPBACK_FAILED <<
+ TG3_PHY_LOOPBACK_SHIFT;
}
/* Re-enable gphy autopowerdown. */
@@ -11176,7 +11542,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_halt(tp, RESET_KIND_SUSPEND, 1);
err = tg3_nvram_lock(tp);
tg3_halt_cpu(tp, RX_CPU_BASE);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!tg3_flag(tp, 5705_PLUS))
tg3_halt_cpu(tp, TX_CPU_BASE);
if (!err)
tg3_nvram_unlock(tp);
@@ -11206,7 +11572,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
if (netif_running(dev)) {
- tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+ tg3_flag_set(tp, INIT_COMPLETE);
err2 = tg3_restart_hw(tp, 1);
if (!err2)
tg3_netif_start(tp);
@@ -11228,7 +11594,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
struct tg3 *tp = netdev_priv(dev);
int err;
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+ if (tg3_flag(tp, USE_PHYLIB)) {
struct phy_device *phydev;
if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
return -EAGAIN;
@@ -11247,9 +11613,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
- ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
- !netif_running(dev)))
+ if (!netif_running(dev))
return -EAGAIN;
spin_lock_bh(&tp->lock);
@@ -11265,9 +11629,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
break; /* We have no PHY */
- if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
- ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
- !netif_running(dev)))
+ if (!netif_running(dev))
return -EAGAIN;
spin_lock_bh(&tp->lock);
@@ -11297,7 +11659,7 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
+ if (!tg3_flag(tp, 5705_PLUS)) {
max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
@@ -11364,14 +11726,9 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.set_ringparam = tg3_set_ringparam,
.get_pauseparam = tg3_get_pauseparam,
.set_pauseparam = tg3_set_pauseparam,
- .get_rx_csum = tg3_get_rx_csum,
- .set_rx_csum = tg3_set_rx_csum,
- .set_tx_csum = tg3_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
- .set_tso = tg3_set_tso,
.self_test = tg3_self_test,
.get_strings = tg3_get_strings,
- .phys_id = tg3_phys_id,
+ .set_phys_id = tg3_set_phys_id,
.get_ethtool_stats = tg3_get_ethtool_stats,
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
@@ -11416,8 +11773,7 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp)
{
u32 val;
- if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
- tg3_nvram_read(tp, 0, &val) != 0)
+ if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
return;
/* Selfboot format */
@@ -11452,19 +11808,19 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
nvcfg1 = tr32(NVRAM_CFG1);
if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, FLASH);
} else {
nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
tw32(NVRAM_CFG1, nvcfg1);
}
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ tg3_flag(tp, 5780_CLASS)) {
switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
tp->nvram_jedecnum = JEDEC_ATMEL;
tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
break;
case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
tp->nvram_jedecnum = JEDEC_ATMEL;
@@ -11473,12 +11829,12 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
case FLASH_VENDOR_ATMEL_EEPROM:
tp->nvram_jedecnum = JEDEC_ATMEL;
tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
break;
case FLASH_VENDOR_ST:
tp->nvram_jedecnum = JEDEC_ST;
tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
break;
case FLASH_VENDOR_SAIFUN:
tp->nvram_jedecnum = JEDEC_SAIFUN;
@@ -11493,7 +11849,7 @@ static void __devinit tg3_get_nvram_info(struct tg3 *tp)
} else {
tp->nvram_jedecnum = JEDEC_ATMEL;
tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
}
}
@@ -11532,29 +11888,29 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
/* NVRAM protection for TPM */
if (nvcfg1 & (1 << 27))
- tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
+ tg3_flag_set(tp, PROTECTED_NVRAM);
switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
break;
case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
break;
case FLASH_5752VENDOR_ST_M45PE10:
case FLASH_5752VENDOR_ST_M45PE20:
case FLASH_5752VENDOR_ST_M45PE40:
tp->nvram_jedecnum = JEDEC_ST;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
break;
}
- if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
+ if (tg3_flag(tp, FLASH)) {
tg3_nvram_get_pagesize(tp, nvcfg1);
} else {
/* For eeprom, set pagesize to maximum eeprom size */
@@ -11573,7 +11929,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
/* NVRAM protection for TPM */
if (nvcfg1 & (1 << 27)) {
- tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
+ tg3_flag_set(tp, PROTECTED_NVRAM);
protect = 1;
}
@@ -11584,8 +11940,8 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
case FLASH_5755VENDOR_ATMEL_FLASH_3:
case FLASH_5755VENDOR_ATMEL_FLASH_5:
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
tp->nvram_pagesize = 264;
if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
@@ -11602,8 +11958,8 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
case FLASH_5752VENDOR_ST_M45PE20:
case FLASH_5752VENDOR_ST_M45PE40:
tp->nvram_jedecnum = JEDEC_ST;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
tp->nvram_pagesize = 256;
if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
tp->nvram_size = (protect ?
@@ -11633,7 +11989,7 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
@@ -11644,16 +12000,16 @@ static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
case FLASH_5755VENDOR_ATMEL_FLASH_2:
case FLASH_5755VENDOR_ATMEL_FLASH_3:
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
tp->nvram_pagesize = 264;
break;
case FLASH_5752VENDOR_ST_M45PE10:
case FLASH_5752VENDOR_ST_M45PE20:
case FLASH_5752VENDOR_ST_M45PE40:
tp->nvram_jedecnum = JEDEC_ST;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
tp->nvram_pagesize = 256;
break;
}
@@ -11667,7 +12023,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
/* NVRAM protection for TPM */
if (nvcfg1 & (1 << 27)) {
- tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
+ tg3_flag_set(tp, PROTECTED_NVRAM);
protect = 1;
}
@@ -11682,9 +12038,9 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
case FLASH_5761VENDOR_ATMEL_MDB081D:
case FLASH_5761VENDOR_ATMEL_MDB161D:
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
- tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
tp->nvram_pagesize = 256;
break;
case FLASH_5761VENDOR_ST_A_M45PE20:
@@ -11696,8 +12052,8 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
case FLASH_5761VENDOR_ST_M_M45PE80:
case FLASH_5761VENDOR_ST_M_M45PE16:
tp->nvram_jedecnum = JEDEC_ST;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
tp->nvram_pagesize = 256;
break;
}
@@ -11737,7 +12093,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
{
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
}
@@ -11751,7 +12107,7 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
@@ -11765,8 +12121,8 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
case FLASH_57780VENDOR_ATMEL_AT45DB041D:
case FLASH_57780VENDOR_ATMEL_AT45DB041B:
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
@@ -11788,8 +12144,8 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
case FLASH_5752VENDOR_ST_M45PE20:
case FLASH_5752VENDOR_ST_M45PE40:
tp->nvram_jedecnum = JEDEC_ST;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
case FLASH_5752VENDOR_ST_M45PE10:
@@ -11804,13 +12160,13 @@ static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
}
break;
default:
- tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
+ tg3_flag_set(tp, NO_NVRAM);
return;
}
tg3_nvram_get_pagesize(tp, nvcfg1);
if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
- tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
}
@@ -11824,7 +12180,7 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
case FLASH_5717VENDOR_ATMEL_EEPROM:
case FLASH_5717VENDOR_MICRO_EEPROM:
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
@@ -11838,11 +12194,13 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
case FLASH_5717VENDOR_ATMEL_ADB021D:
case FLASH_5717VENDOR_ATMEL_45USPT:
tp->nvram_jedecnum = JEDEC_ATMEL;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
case FLASH_5717VENDOR_ATMEL_MDB021D:
+ /* Detect size with tg3_nvram_get_size() */
+ break;
case FLASH_5717VENDOR_ATMEL_ADB021B:
case FLASH_5717VENDOR_ATMEL_ADB021D:
tp->nvram_size = TG3_NVRAM_SIZE_256KB;
@@ -11863,13 +12221,15 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
case FLASH_5717VENDOR_ST_25USPT:
case FLASH_5717VENDOR_ST_45USPT:
tp->nvram_jedecnum = JEDEC_ST;
- tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
- tp->tg3_flags2 |= TG3_FLG2_FLASH;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
case FLASH_5717VENDOR_ST_M_M25PE20:
- case FLASH_5717VENDOR_ST_A_M25PE20:
case FLASH_5717VENDOR_ST_M_M45PE20:
+ /* Detect size with tg3_nvram_get_size() */
+ break;
+ case FLASH_5717VENDOR_ST_A_M25PE20:
case FLASH_5717VENDOR_ST_A_M45PE20:
tp->nvram_size = TG3_NVRAM_SIZE_256KB;
break;
@@ -11879,13 +12239,125 @@ static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
}
break;
default:
- tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
+ tg3_flag_set(tp, NO_NVRAM);
return;
}
tg3_nvram_get_pagesize(tp, nvcfg1);
if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
- tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
+{
+ u32 nvcfg1, nvmpinstrp;
+
+ nvcfg1 = tr32(NVRAM_CFG1);
+ nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
+
+ switch (nvmpinstrp) {
+ case FLASH_5720_EEPROM_HD:
+ case FLASH_5720_EEPROM_LD:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+
+ nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+ tw32(NVRAM_CFG1, nvcfg1);
+ if (nvmpinstrp == FLASH_5720_EEPROM_HD)
+ tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+ else
+ tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
+ return;
+ case FLASH_5720VENDOR_M_ATMEL_DB011D:
+ case FLASH_5720VENDOR_A_ATMEL_DB011B:
+ case FLASH_5720VENDOR_A_ATMEL_DB011D:
+ case FLASH_5720VENDOR_M_ATMEL_DB021D:
+ case FLASH_5720VENDOR_A_ATMEL_DB021B:
+ case FLASH_5720VENDOR_A_ATMEL_DB021D:
+ case FLASH_5720VENDOR_M_ATMEL_DB041D:
+ case FLASH_5720VENDOR_A_ATMEL_DB041B:
+ case FLASH_5720VENDOR_A_ATMEL_DB041D:
+ case FLASH_5720VENDOR_M_ATMEL_DB081D:
+ case FLASH_5720VENDOR_A_ATMEL_DB081D:
+ case FLASH_5720VENDOR_ATMEL_45USPT:
+ tp->nvram_jedecnum = JEDEC_ATMEL;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvmpinstrp) {
+ case FLASH_5720VENDOR_M_ATMEL_DB021D:
+ case FLASH_5720VENDOR_A_ATMEL_DB021B:
+ case FLASH_5720VENDOR_A_ATMEL_DB021D:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ case FLASH_5720VENDOR_M_ATMEL_DB041D:
+ case FLASH_5720VENDOR_A_ATMEL_DB041B:
+ case FLASH_5720VENDOR_A_ATMEL_DB041D:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ case FLASH_5720VENDOR_M_ATMEL_DB081D:
+ case FLASH_5720VENDOR_A_ATMEL_DB081D:
+ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
+ break;
+ case FLASH_5720VENDOR_M_ST_M25PE10:
+ case FLASH_5720VENDOR_M_ST_M45PE10:
+ case FLASH_5720VENDOR_A_ST_M25PE10:
+ case FLASH_5720VENDOR_A_ST_M45PE10:
+ case FLASH_5720VENDOR_M_ST_M25PE20:
+ case FLASH_5720VENDOR_M_ST_M45PE20:
+ case FLASH_5720VENDOR_A_ST_M25PE20:
+ case FLASH_5720VENDOR_A_ST_M45PE20:
+ case FLASH_5720VENDOR_M_ST_M25PE40:
+ case FLASH_5720VENDOR_M_ST_M45PE40:
+ case FLASH_5720VENDOR_A_ST_M25PE40:
+ case FLASH_5720VENDOR_A_ST_M45PE40:
+ case FLASH_5720VENDOR_M_ST_M25PE80:
+ case FLASH_5720VENDOR_M_ST_M45PE80:
+ case FLASH_5720VENDOR_A_ST_M25PE80:
+ case FLASH_5720VENDOR_A_ST_M45PE80:
+ case FLASH_5720VENDOR_ST_25USPT:
+ case FLASH_5720VENDOR_ST_45USPT:
+ tp->nvram_jedecnum = JEDEC_ST;
+ tg3_flag_set(tp, NVRAM_BUFFERED);
+ tg3_flag_set(tp, FLASH);
+
+ switch (nvmpinstrp) {
+ case FLASH_5720VENDOR_M_ST_M25PE20:
+ case FLASH_5720VENDOR_M_ST_M45PE20:
+ case FLASH_5720VENDOR_A_ST_M25PE20:
+ case FLASH_5720VENDOR_A_ST_M45PE20:
+ tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+ break;
+ case FLASH_5720VENDOR_M_ST_M25PE40:
+ case FLASH_5720VENDOR_M_ST_M45PE40:
+ case FLASH_5720VENDOR_A_ST_M25PE40:
+ case FLASH_5720VENDOR_A_ST_M45PE40:
+ tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+ break;
+ case FLASH_5720VENDOR_M_ST_M25PE80:
+ case FLASH_5720VENDOR_M_ST_M45PE80:
+ case FLASH_5720VENDOR_A_ST_M25PE80:
+ case FLASH_5720VENDOR_A_ST_M45PE80:
+ tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+ break;
+ default:
+ tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+ break;
+ }
+ break;
+ default:
+ tg3_flag_set(tp, NO_NVRAM);
+ return;
+ }
+
+ tg3_nvram_get_pagesize(tp, nvcfg1);
+ if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+ tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
}
/* Chips other than 5700/5701 use the NVRAM for fetching info. */
@@ -11905,7 +12377,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
- tp->tg3_flags |= TG3_FLAG_NVRAM;
+ tg3_flag_set(tp, NVRAM);
if (tg3_nvram_lock(tp)) {
netdev_warn(tp->dev,
@@ -11935,6 +12407,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
tg3_get_5717_nvram_info(tp);
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tg3_get_5720_nvram_info(tp);
else
tg3_get_nvram_info(tp);
@@ -11945,7 +12419,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
tg3_nvram_unlock(tp);
} else {
- tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
+ tg3_flag_clear(tp, NVRAM);
+ tg3_flag_clear(tp, NVRAM_BUFFERED);
tg3_get_eeprom_size(tp);
}
@@ -12128,7 +12603,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
nvram_cmd |= NVRAM_CMD_LAST;
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
- !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
+ !tg3_flag(tp, 5755_PLUS) &&
(tp->nvram_jedecnum == JEDEC_ST) &&
(nvram_cmd & NVRAM_CMD_FIRST)) {
@@ -12138,7 +12613,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
break;
}
- if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
+ if (!tg3_flag(tp, FLASH)) {
/* We always do complete word writes to eeprom. */
nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
}
@@ -12154,13 +12629,13 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
{
int ret;
- if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
+ if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
~GRC_LCLCTRL_GPIO_OUTPUT1);
udelay(40);
}
- if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
+ if (!tg3_flag(tp, NVRAM)) {
ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
} else {
u32 grc_mode;
@@ -12170,16 +12645,13 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
return ret;
tg3_enable_nvram_access(tp);
- if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
- !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
+ if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
tw32(NVRAM_WRITE1, 0x406);
grc_mode = tr32(GRC_MODE);
tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
- if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
- !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
-
+ if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
ret = tg3_nvram_write_block_buffered(tp, offset, len,
buf);
} else {
@@ -12194,7 +12666,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
tg3_nvram_unlock(tp);
}
- if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
+ if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(40);
}
@@ -12316,19 +12788,22 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
/* Assume an onboard device and WOL capable by default. */
- tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
+ tg3_flag_set(tp, EEPROM_WRITE_PROT);
+ tg3_flag_set(tp, WOL_CAP);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
- tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
- tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
+ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+ tg3_flag_set(tp, IS_NIC);
}
val = tr32(VCPU_CFGSHDW);
if (val & VCPU_CFGSHDW_ASPM_DBNC)
- tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
+ tg3_flag_set(tp, ASPM_WORKAROUND);
if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
- (val & VCPU_CFGSHDW_WOL_MAGPKT))
- tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
+ (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
+ tg3_flag_set(tp, WOL_ENABLE);
+ device_set_wakeup_enable(&tp->pdev->dev, true);
+ }
goto done;
}
@@ -12369,13 +12844,13 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->phy_id = eeprom_phy_id;
if (eeprom_phy_serdes) {
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!tg3_flag(tp, 5705_PLUS))
tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
else
tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
}
- if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
+ if (tg3_flag(tp, 5750_PLUS))
led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
SHASTA_EXT_LED_MODE_MASK);
else
@@ -12435,34 +12910,36 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
- tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
+ tg3_flag_set(tp, EEPROM_WRITE_PROT);
if ((tp->pdev->subsystem_vendor ==
PCI_VENDOR_ID_ARIMA) &&
(tp->pdev->subsystem_device == 0x205a ||
tp->pdev->subsystem_device == 0x2063))
- tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
+ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
} else {
- tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
- tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
+ tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+ tg3_flag_set(tp, IS_NIC);
}
if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
- tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
- if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
- tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
+ tg3_flag_set(tp, ENABLE_ASF);
+ if (tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
}
if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
- (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
- tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
+ tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, ENABLE_APE);
if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
!(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
- tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
+ tg3_flag_clear(tp, WOL_CAP);
- if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
- (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
- tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
+ if (tg3_flag(tp, WOL_CAP) &&
+ (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
+ tg3_flag_set(tp, WOL_ENABLE);
+ device_set_wakeup_enable(&tp->pdev->dev, true);
+ }
if (cfg2 & (1 << 17))
tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
@@ -12472,33 +12949,33 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if (cfg2 & (1 << 18))
tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
- if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
- ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
- GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
+ if ((tg3_flag(tp, 57765_PLUS) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+ GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
(cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
- if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
+ if (tg3_flag(tp, PCI_EXPRESS) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
- !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
+ !tg3_flag(tp, 57765_PLUS)) {
u32 cfg3;
tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
- tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
+ tg3_flag_set(tp, ASPM_WORKAROUND);
}
if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
- tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
+ tg3_flag_set(tp, RGMII_INBAND_DISABLE);
if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
- tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
+ tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
- tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
+ tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
}
done:
- if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
+ if (tg3_flag(tp, WOL_CAP))
device_set_wakeup_enable(&tp->pdev->dev,
- tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
+ tg3_flag(tp, WOL_ENABLE));
else
device_set_wakeup_capable(&tp->pdev->dev, false);
}
@@ -12588,18 +13065,17 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
int err;
/* flow control autonegotiation is default behavior */
- tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+ tg3_flag_set(tp, PAUSE_AUTONEG);
tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
+ if (tg3_flag(tp, USE_PHYLIB))
return tg3_phy_init(tp);
/* Reading the PHY ID register can conflict with ASF
* firmware access to the PHY hardware.
*/
err = 0;
- if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
- (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
+ if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
} else {
/* Now read the physical PHY_ID from the chip and verify
@@ -12655,8 +13131,8 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
tg3_phy_init_link_config(tp);
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
- !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
- !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
+ !tg3_flag(tp, ENABLE_APE) &&
+ !tg3_flag(tp, ENABLE_ASF)) {
u32 bmsr, adv_reg, tg3_ctrl, mask;
tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -12717,46 +13193,11 @@ static void __devinit tg3_read_vpd(struct tg3 *tp)
u8 *vpd_data;
unsigned int block_end, rosize, len;
int j, i = 0;
- u32 magic;
- if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
- tg3_nvram_read(tp, 0x0, &magic))
- goto out_no_vpd;
-
- vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
+ vpd_data = (u8 *)tg3_vpd_readblock(tp);
if (!vpd_data)
goto out_no_vpd;
- if (magic == TG3_EEPROM_MAGIC) {
- for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
- u32 tmp;
-
- /* The data is in little-endian format in NVRAM.
- * Use the big-endian read routines to preserve
- * the byte order as it exists in NVRAM.
- */
- if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
- goto out_not_found;
-
- memcpy(&vpd_data[i], &tmp, sizeof(tmp));
- }
- } else {
- ssize_t cnt;
- unsigned int pos = 0;
-
- for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
- cnt = pci_read_vpd(tp->pdev, pos,
- TG3_NVM_VPD_LEN - pos,
- &vpd_data[pos]);
- if (cnt == -ETIMEDOUT || cnt == -EINTR)
- cnt = 0;
- else if (cnt < 0)
- goto out_not_found;
- }
- if (pos != TG3_NVM_VPD_LEN)
- goto out_not_found;
- }
-
i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
PCI_VPD_LRDT_RO_DATA);
if (i < 0)
@@ -13010,7 +13451,7 @@ static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
if (offset == TG3_NVM_DIR_END)
return;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!tg3_flag(tp, 5705_PLUS))
start = 0x08000000;
else if (tg3_nvram_read(tp, offset - 4, &start))
return;
@@ -13050,8 +13491,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
u32 apedata;
char *fwtype;
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
- !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
+ if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
return;
apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
@@ -13065,7 +13505,7 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
- tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
+ tg3_flag_set(tp, APE_HAS_NCSI);
fwtype = "NCSI";
} else {
fwtype = "DASH";
@@ -13089,7 +13529,7 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
if (tp->fw_ver[0] != 0)
vpd_vers = true;
- if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
+ if (tg3_flag(tp, NO_NVRAM)) {
strcat(tp->fw_ver, "sb");
return;
}
@@ -13106,8 +13546,7 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
else
return;
- if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
- (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
+ if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
goto done;
tg3_read_mgmtfw_ver(tp);
@@ -13118,21 +13557,14 @@ done:
static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
-static inline void vlan_features_add(struct net_device *dev, unsigned long flags)
-{
- dev->vlan_features |= flags;
-}
-
static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
{
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
- return 4096;
- else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
- return 1024;
+ if (tg3_flag(tp, LRG_PROD_RING_CAP))
+ return TG3_RX_RET_MAX_SIZE_5717;
+ else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
+ return TG3_RX_RET_MAX_SIZE_5700;
else
- return 512;
+ return TG3_RX_RET_MAX_SIZE_5705;
}
static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
@@ -13177,7 +13609,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
pci_read_config_dword(tp->pdev,
TG3PCI_GEN2_PRODID_ASICREV,
&prod_id_asic_rev);
@@ -13254,8 +13687,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (bridge->subordinate &&
(bridge->subordinate->number ==
tp->pdev->bus->number)) {
-
- tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
+ tg3_flag_set(tp, ICH_WORKAROUND);
pci_dev_put(bridge);
break;
}
@@ -13287,7 +13719,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->bus->number) &&
(bridge->subordinate->subordinate >=
tp->pdev->bus->number)) {
- tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
+ tg3_flag_set(tp, 5701_DMA_BUG);
pci_dev_put(bridge);
break;
}
@@ -13302,8 +13734,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
- tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
- tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
+ tg3_flag_set(tp, 5780_CLASS);
+ tg3_flag_set(tp, 40BIT_DMA_BUG);
tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
} else {
struct pci_dev *bridge = NULL;
@@ -13317,7 +13749,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->bus->number) &&
(bridge->subordinate->subordinate >=
tp->pdev->bus->number)) {
- tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
+ tg3_flag_set(tp, 40BIT_DMA_BUG);
pci_dev_put(bridge);
break;
}
@@ -13332,13 +13764,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
tp->pdev_peer = tg3_find_peer(tp);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
- tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ tg3_flag_set(tp, 5717_PLUS);
+
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
+ tg3_flag(tp, 5717_PLUS))
+ tg3_flag_set(tp, 57765_PLUS);
/* Intentionally exclude ASIC_REV_5906 */
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
@@ -13347,52 +13784,51 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
- tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
+ tg3_flag(tp, 57765_PLUS))
+ tg3_flag_set(tp, 5755_PLUS);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
- (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
- tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
+ tg3_flag(tp, 5755_PLUS) ||
+ tg3_flag(tp, 5780_CLASS))
+ tg3_flag_set(tp, 5750_PLUS);
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
- (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
- tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
+ tg3_flag(tp, 5750_PLUS))
+ tg3_flag_set(tp, 5705_PLUS);
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
- if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
- tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
- else {
- unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
+ if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
+ u32 features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
- if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
+ if (tg3_flag(tp, 5755_PLUS))
features |= NETIF_F_IPV6_CSUM;
tp->dev->features |= features;
- vlan_features_add(tp->dev, features);
+ tp->dev->hw_features |= features;
+ tp->dev->vlan_features |= features;
}
/* Determine TSO capabilities */
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
; /* Do nothing. HW bug. */
- else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
- tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
- else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
+ else if (tg3_flag(tp, 57765_PLUS))
+ tg3_flag_set(tp, HW_TSO_3);
+ else if (tg3_flag(tp, 5755_PLUS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
- tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
- else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
- tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
+ tg3_flag_set(tp, HW_TSO_2);
+ else if (tg3_flag(tp, 5750_PLUS)) {
+ tg3_flag_set(tp, HW_TSO_1);
+ tg3_flag_set(tp, TSO_BUG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
- tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
+ tg3_flag_clear(tp, TSO_BUG);
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
- tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
+ tg3_flag_set(tp, TSO_BUG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
tp->fw_needed = FIRMWARE_TG3TSO5;
else
@@ -13401,22 +13837,22 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->irq_max = 1;
- if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
- tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
+ if (tg3_flag(tp, 5750_PLUS)) {
+ tg3_flag_set(tp, SUPPORT_MSI);
if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
tp->pdev_peer == tp->pdev))
- tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
+ tg3_flag_clear(tp, SUPPORT_MSI);
- if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
+ if (tg3_flag(tp, 5755_PLUS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
- tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
+ tg3_flag_set(tp, 1SHOT_MSI);
}
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
- tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
+ if (tg3_flag(tp, 57765_PLUS)) {
+ tg3_flag_set(tp, SUPPORT_MSIX);
tp->irq_max = TG3_IRQ_MAX_VECS;
}
}
@@ -13424,20 +13860,23 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
- tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
- else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
- tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
- tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
+ tg3_flag_set(tp, SHORT_DMA_BUG);
+ else if (!tg3_flag(tp, 5755_PLUS)) {
+ tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
+ tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
}
- if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
+ if (tg3_flag(tp, 5717_PLUS))
+ tg3_flag_set(tp, LRG_PROD_RING_CAP);
+
+ if (tg3_flag(tp, 57765_PLUS) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
- tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
+ tg3_flag_set(tp, USE_JUMBO_BDFLAG);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
- (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
- tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
+ if (!tg3_flag(tp, 5705_PLUS) ||
+ tg3_flag(tp, 5780_CLASS) ||
+ tg3_flag(tp, USE_JUMBO_BDFLAG))
+ tg3_flag_set(tp, JUMBO_CAPABLE);
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
&pci_state_reg);
@@ -13446,10 +13885,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->pcie_cap != 0) {
u16 lnkctl;
- tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
+ tg3_flag_set(tp, PCI_EXPRESS);
tp->pcie_readrq = 4096;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
tp->pcie_readrq = 2048;
pcie_set_readrq(tp->pdev, tp->pcie_readrq);
@@ -13459,19 +13899,19 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
&lnkctl);
if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
- tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
+ tg3_flag_clear(tp, HW_TSO_2);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
- tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
+ tg3_flag_set(tp, CLKREQ_BUG);
} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
- tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
+ tg3_flag_set(tp, L1PLLPD_EN);
}
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
- tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
- } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ tg3_flag_set(tp, PCI_EXPRESS);
+ } else if (!tg3_flag(tp, 5705_PLUS) ||
+ tg3_flag(tp, 5780_CLASS)) {
tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
if (!tp->pcix_cap) {
dev_err(&tp->pdev->dev,
@@ -13480,7 +13920,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
- tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
+ tg3_flag_set(tp, PCIX_MODE);
}
/* If we have an AMD 762 or VIA K8T800 chipset, write
@@ -13490,8 +13930,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
* posted to the chip in order.
*/
if (pci_dev_present(tg3_write_reorder_chipsets) &&
- !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
- tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
+ !tg3_flag(tp, PCI_EXPRESS))
+ tg3_flag_set(tp, MBOX_WRITE_REORDER);
pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
&tp->pci_cacheline_sz);
@@ -13508,17 +13948,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* 5700 BX chips need to have their TX producer index
* mailboxes written twice to workaround a bug.
*/
- tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
+ tg3_flag_set(tp, TXD_MBOX_HWBUG);
/* If we are in PCI-X mode, enable register write workaround.
*
* The workaround is to use indirect register accesses
* for all chip writes not to mailbox registers.
*/
- if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
+ if (tg3_flag(tp, PCIX_MODE)) {
u32 pm_reg;
- tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
+ tg3_flag_set(tp, PCIX_TARGET_HWBUG);
/* The chip can have it's power management PCI config
* space registers clobbered due to this bug.
@@ -13541,9 +13981,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
- tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
+ tg3_flag_set(tp, PCI_HIGH_SPEED);
if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
- tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
+ tg3_flag_set(tp, PCI_32BIT);
/* Chip-specific fixup from Broadcom driver */
if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
@@ -13561,10 +14001,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->write32_rx_mbox = tg3_write32;
/* Various workaround register access methods */
- if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
+ if (tg3_flag(tp, PCIX_TARGET_HWBUG))
tp->write32 = tg3_write_indirect_reg32;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
- ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
+ (tg3_flag(tp, PCI_EXPRESS) &&
tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
/*
* Back to back register writes can cause problems on these
@@ -13576,14 +14016,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->write32 = tg3_write_flush_reg32;
}
- if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
- (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
+ if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
tp->write32_tx_mbox = tg3_write32_tx_mbox;
- if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
+ if (tg3_flag(tp, MBOX_WRITE_REORDER))
tp->write32_rx_mbox = tg3_write_flush_reg32;
}
- if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
+ if (tg3_flag(tp, ICH_WORKAROUND)) {
tp->read32 = tg3_read_indirect_reg32;
tp->write32 = tg3_write_indirect_reg32;
tp->read32_mbox = tg3_read_indirect_mbox;
@@ -13606,13 +14045,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
if (tp->write32 == tg3_write_indirect_reg32 ||
- ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
+ (tg3_flag(tp, PCIX_MODE) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
- tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
+ tg3_flag_set(tp, SRAM_USE_CONFIG);
/* Get eeprom hw config before calling tg3_set_power_state().
- * In particular, the TG3_FLG2_IS_NIC flag must be
+ * In particular, the TG3_FLAG_IS_NIC flag must be
* determined before calling tg3_set_power_state() so that
* we know whether or not to switch out of Vaux power.
* When the flag is set, it means that GPIO1 is used for eeprom
@@ -13621,7 +14060,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
*/
tg3_get_eeprom_hw_cfg(tp);
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ if (tg3_flag(tp, ENABLE_APE)) {
/* Allow reads and writes to the
* APE register and memory space.
*/
@@ -13636,16 +14075,16 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
- tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
+ tg3_flag(tp, 57765_PLUS))
+ tg3_flag_set(tp, CPMU_PRESENT);
- /* Set up tp->grc_local_ctrl before calling tg_power_up().
+ /* Set up tp->grc_local_ctrl before calling tg3_power_up().
* GPIO1 driven high will bring 5700's external PHY out of reset.
* It is also used as eeprom write protect on LOMs.
*/
tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
- (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
+ tg3_flag(tp, EEPROM_WRITE_PROT))
tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
GRC_LCLCTRL_GPIO_OUTPUT1);
/* Unused GPIO3 must be driven as output on 5752 because there
@@ -13663,7 +14102,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
/* Turn off the debug UART. */
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
- if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
+ if (tg3_flag(tp, IS_NIC))
/* Keep VMain power. */
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
GRC_LCLCTRL_GPIO_OUTPUT0;
@@ -13679,18 +14118,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Derive initial jumbo mode from MTU assigned in
* ether_setup() via the alloc_etherdev() call
*/
- if (tp->dev->mtu > ETH_DATA_LEN &&
- !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
- tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
+ if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
+ tg3_flag_set(tp, JUMBO_RING_ENABLE);
/* Determine WakeOnLan speed to use. */
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
- tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
+ tg3_flag_clear(tp, WOL_SPEED_100MB);
} else {
- tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
+ tg3_flag_set(tp, WOL_SPEED_100MB);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -13711,11 +14149,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
- if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
+ if (tg3_flag(tp, 5705_PLUS) &&
!(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
- !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
+ !tg3_flag(tp, 57765_PLUS)) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -13736,7 +14174,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->phy_otp = TG3_OTP_DEFAULT;
}
- if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
+ if (tg3_flag(tp, CPMU_PRESENT))
tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
else
tp->mi_mode = MAC_MI_MODE_BASE;
@@ -13746,9 +14184,17 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
+ /* Set these bits to enable statistics workaround. */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
+ tp->coalesce_mode |= HOSTCC_MODE_ATTN;
+ tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
+ }
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
- tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
+ tg3_flag_set(tp, USE_PHYLIB);
err = tg3_mdio_init(tp);
if (err)
@@ -13756,7 +14202,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Initialize data/descriptor byte/word swapping. */
val = tr32(GRC_MODE);
- val &= GRC_MODE_HOST_STACKUP;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+ val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
+ GRC_MODE_WORD_SWAP_B2HRX_DATA |
+ GRC_MODE_B2HRX_ENABLE |
+ GRC_MODE_HTX2B_ENABLE |
+ GRC_MODE_HOST_STACKUP);
+ else
+ val &= GRC_MODE_HOST_STACKUP;
+
tw32(GRC_MODE, val | tp->grc_mode);
tg3_switch_clocks(tp);
@@ -13767,7 +14221,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
&pci_state_reg);
if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
+ !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
if (chiprevid == CHIPREV_ID_5701_A0 ||
@@ -13786,7 +14240,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
writel(0x00000000, sram_base + 4);
writel(0xffffffff, sram_base + 4);
if (readl(sram_base) != 0x00000000)
- tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
+ tg3_flag_set(tp, PCIX_TARGET_HWBUG);
}
}
@@ -13799,12 +14253,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
(grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
- tp->tg3_flags2 |= TG3_FLG2_IS_5788;
+ tg3_flag_set(tp, IS_5788);
- if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
+ if (!tg3_flag(tp, IS_5788) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
- tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
- if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
+ tg3_flag_set(tp, TAGGED_STATUS);
+ if (tg3_flag(tp, TAGGED_STATUS)) {
tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
HOSTCC_MODE_CLRTICK_TXBD);
@@ -13814,7 +14268,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
}
/* Preserve the APE MAC_MODE bits */
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+ if (tg3_flag(tp, ENABLE_APE))
tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
else
tp->mac_mode = TG3_DEF_MAC_MODE;
@@ -13861,9 +14315,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
* status register in those cases.
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
- tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
+ tg3_flag_set(tp, USE_LINKCHG_REG);
else
- tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
+ tg3_flag_clear(tp, USE_LINKCHG_REG);
/* The led_ctrl is set during tg3_phy_probe, here we might
* have to force the link status polling mechanism based
@@ -13873,19 +14327,19 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
- tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
+ tg3_flag_set(tp, USE_LINKCHG_REG);
}
/* For all SERDES we poll the MAC status register. */
if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
- tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
+ tg3_flag_set(tp, POLL_SERDES);
else
- tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
+ tg3_flag_clear(tp, POLL_SERDES);
tp->rx_offset = NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
- (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
+ tg3_flag(tp, PCIX_MODE)) {
tp->rx_offset = 0;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
tp->rx_copy_thresh = ~(u16)0;
@@ -13906,7 +14360,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
tp->rx_std_max_post = 8;
- if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
+ if (tg3_flag(tp, ASPM_WORKAROUND))
tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
PCIE_PWR_MGMT_L1_THRESH_MSK;
@@ -13954,15 +14408,14 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
mac_offset = 0x7c;
if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
- (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
+ tg3_flag(tp, 5780_CLASS)) {
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
mac_offset = 0xcc;
if (tg3_nvram_lock(tp))
tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
else
tg3_nvram_unlock(tp);
- } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+ } else if (tg3_flag(tp, 5717_PLUS)) {
if (PCI_FUNC(tp->pdev->devfn) & 1)
mac_offset = 0xcc;
if (PCI_FUNC(tp->pdev->devfn) > 1)
@@ -13987,7 +14440,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
}
if (!addr_ok) {
/* Next, try NVRAM. */
- if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
+ if (!tg3_flag(tp, NO_NVRAM) &&
!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
!tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
@@ -14038,7 +14491,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
*/
if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
- !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
+ !tg3_flag(tp, PCI_EXPRESS))
goto out;
#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
@@ -14051,7 +14504,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
#endif
#endif
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
+ if (tg3_flag(tp, 57765_PLUS)) {
val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
goto out;
}
@@ -14070,8 +14523,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
* other than 5700 and 5701 which do not implement the
* boundary bits.
*/
- if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
- !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
+ if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
switch (cacheline_size) {
case 16:
case 32:
@@ -14096,7 +14548,7 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
break;
}
- } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+ } else if (tg3_flag(tp, PCI_EXPRESS)) {
switch (cacheline_size) {
case 16:
case 32:
@@ -14268,13 +14720,13 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
+ if (tg3_flag(tp, 57765_PLUS))
goto out;
- if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+ if (tg3_flag(tp, PCI_EXPRESS)) {
/* DMA read watermark not used on PCIE */
tp->dma_rwctrl |= 0x00180000;
- } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
+ } else if (!tg3_flag(tp, PCIX_MODE)) {
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
tp->dma_rwctrl |= 0x003f0000;
@@ -14290,7 +14742,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
* do the less restrictive ONE_DMA workaround for
* better performance.
*/
- if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
+ if (tg3_flag(tp, 40BIT_DMA_BUG) &&
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
tp->dma_rwctrl |= 0x8000;
else if (ccval == 0x6 || ccval == 0x7)
@@ -14419,7 +14871,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
}
if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
DMA_RWCTRL_WRITE_BNDRY_16) {
-
/* DMA test passed without adjusting DMA boundary,
* now look for chipsets that are known to expose the
* DMA bug without failing the test.
@@ -14443,7 +14894,7 @@ out_nofree:
static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
{
- if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
+ if (tg3_flag(tp, 57765_PLUS)) {
tp->bufmgr_config.mbuf_read_dma_low_water =
DEFAULT_MB_RDMA_LOW_WATER_5705;
tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14457,7 +14908,7 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
tp->bufmgr_config.mbuf_high_water_jumbo =
DEFAULT_MB_HIGH_WATER_JUMBO_57765;
- } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ } else if (tg3_flag(tp, 5705_PLUS)) {
tp->bufmgr_config.mbuf_read_dma_low_water =
DEFAULT_MB_RDMA_LOW_WATER_5705;
tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14521,6 +14972,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
case TG3_PHY_ID_BCM5718S: return "5718S";
case TG3_PHY_ID_BCM57765: return "57765";
case TG3_PHY_ID_BCM5719C: return "5719C";
+ case TG3_PHY_ID_BCM5720C: return "5720C";
case TG3_PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
@@ -14529,10 +14981,10 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
{
- if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
+ if (tg3_flag(tp, PCI_EXPRESS)) {
strcpy(str, "PCI Express");
return str;
- } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
+ } else if (tg3_flag(tp, PCIX_MODE)) {
u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
strcpy(str, "PCIX:");
@@ -14551,12 +15003,12 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
strcat(str, "100MHz");
} else {
strcpy(str, "PCI:");
- if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
+ if (tg3_flag(tp, PCI_HIGH_SPEED))
strcat(str, "66MHz");
else
strcat(str, "33MHz");
}
- if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
+ if (tg3_flag(tp, PCI_32BIT))
strcat(str, ":32-bit");
else
strcat(str, ":64-bit");
@@ -14615,7 +15067,7 @@ static void __devinit tg3_init_coal(struct tg3 *tp)
ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
}
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
+ if (tg3_flag(tp, 5705_PLUS)) {
ec->rx_coalesce_usecs_irq = 0;
ec->tx_coalesce_usecs_irq = 0;
ec->stats_block_coalesce_usecs = 0;
@@ -14633,6 +15085,8 @@ static const struct net_device_ops tg3_netdev_ops = {
.ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
+ .ndo_fix_features = tg3_fix_features,
+ .ndo_set_features = tg3_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
@@ -14649,6 +15103,7 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
.ndo_do_ioctl = tg3_ioctl,
.ndo_tx_timeout = tg3_tx_timeout,
.ndo_change_mtu = tg3_change_mtu,
+ .ndo_set_features = tg3_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tg3_poll_controller,
#endif
@@ -14663,6 +15118,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
u32 sndmbx, rcvmbx, intmbx;
char str[40];
u64 dma_mask, persist_dma_mask;
+ u32 hw_features = 0;
printk_once(KERN_INFO "%s\n", version);
@@ -14758,9 +15214,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_iounmap;
}
- if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
+ if (tg3_flag(tp, 5755_PLUS) && !tg3_flag(tp, 5717_PLUS))
dev->netdev_ops = &tg3_netdev_ops;
else
dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14772,9 +15226,9 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
* On 64-bit systems without IOMMU, use 64-bit dma_mask and
* do DMA address check in tg3_start_xmit().
*/
- if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
+ if (tg3_flag(tp, IS_5788))
persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
- else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
+ else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
#ifdef CONFIG_HIGHMEM
dma_mask = DMA_BIT_MASK(64);
@@ -14808,11 +15262,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tg3_init_bufmgr_config(tp);
/* Selectively allow TSO based on operating conditions */
- if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
- (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
- tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
+ if ((tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3)) ||
+ (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+ tg3_flag_set(tp, TSO_CAPABLE);
else {
- tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
}
@@ -14823,32 +15280,41 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
* Firmware TSO on older chips gives lower performance, so it
* is off by default, but can be enabled using ethtool.
*/
- if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
- (dev->features & NETIF_F_IP_CSUM)) {
- dev->features |= NETIF_F_TSO;
- vlan_features_add(dev, NETIF_F_TSO);
- }
- if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
- (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
- if (dev->features & NETIF_F_IPV6_CSUM) {
- dev->features |= NETIF_F_TSO6;
- vlan_features_add(dev, NETIF_F_TSO6);
- }
- if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
+ if ((tg3_flag(tp, HW_TSO_1) ||
+ tg3_flag(tp, HW_TSO_2) ||
+ tg3_flag(tp, HW_TSO_3)) &&
+ (dev->features & NETIF_F_IP_CSUM))
+ hw_features |= NETIF_F_TSO;
+ if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
+ if (dev->features & NETIF_F_IPV6_CSUM)
+ hw_features |= NETIF_F_TSO6;
+ if (tg3_flag(tp, HW_TSO_3) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
- dev->features |= NETIF_F_TSO_ECN;
- vlan_features_add(dev, NETIF_F_TSO_ECN);
- }
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ hw_features |= NETIF_F_TSO_ECN;
}
+ dev->hw_features |= hw_features;
+ dev->features |= hw_features;
+ dev->vlan_features |= hw_features;
+
+ /*
+ * Add loopback capability only for a subset of devices that support
+ * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
+ * loopback for the remaining devices.
+ */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+ !tg3_flag(tp, CPMU_PRESENT))
+ /* Add the loopback capability */
+ dev->hw_features |= NETIF_F_LOOPBACK;
+
if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
- !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
+ !tg3_flag(tp, TSO_CAPABLE) &&
!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
- tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
+ tg3_flag_set(tp, MAX_RXPEND_64);
tp->rx_pending = 63;
}
@@ -14859,7 +15325,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
goto err_out_iounmap;
}
- if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
+ if (tg3_flag(tp, ENABLE_APE)) {
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
dev_err(&pdev->dev,
@@ -14870,7 +15336,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tg3_ape_lock_init(tp);
- if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
+ if (tg3_flag(tp, ENABLE_ASF))
tg3_read_dash_ver(tp);
}
@@ -14914,7 +15380,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
else
tnapi->coal_now = HOSTCC_MODE_NOW;
- if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
+ if (!tg3_flag(tp, SUPPORT_MSIX))
break;
/*
@@ -14968,21 +15434,25 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
ethtype = "10/100/1000Base-T";
netdev_info(dev, "attached PHY is %s (%s Ethernet) "
- "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
- (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
+ "(WireSpeed[%d], EEE[%d])\n",
+ tg3_phy_string(tp), ethtype,
+ (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
+ (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
}
netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
- (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
- (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
+ (dev->features & NETIF_F_RXCSUM) != 0,
+ tg3_flag(tp, USE_LINKCHG_REG) != 0,
(tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
- (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
- (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
+ tg3_flag(tp, ENABLE_ASF) != 0,
+ tg3_flag(tp, TSO_CAPABLE) != 0);
netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
tp->dma_rwctrl,
pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
+ pci_save_state(pdev);
+
return 0;
err_out_apeunmap:
@@ -15021,7 +15491,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
cancel_work_sync(&tp->reset_task);
- if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
+ if (!tg3_flag(tp, USE_PHYLIB)) {
tg3_phy_fini(tp);
tg3_mdio_fini(tp);
}
@@ -15067,7 +15537,7 @@ static int tg3_suspend(struct device *device)
tg3_full_lock(tp, 0);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
+ tg3_flag_clear(tp, INIT_COMPLETE);
tg3_full_unlock(tp);
err = tg3_power_down_prepare(tp);
@@ -15076,7 +15546,7 @@ static int tg3_suspend(struct device *device)
tg3_full_lock(tp, 0);
- tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+ tg3_flag_set(tp, INIT_COMPLETE);
err2 = tg3_restart_hw(tp, 1);
if (err2)
goto out;
@@ -15111,7 +15581,7 @@ static int tg3_resume(struct device *device)
tg3_full_lock(tp, 0);
- tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
+ tg3_flag_set(tp, INIT_COMPLETE);
err = tg3_restart_hw(tp, 1);
if (err)
goto out;
@@ -15139,11 +15609,156 @@ static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
#endif /* CONFIG_PM_SLEEP */
+/**
+ * tg3_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
+
+ netdev_info(netdev, "PCI I/O error detected\n");
+
+ rtnl_lock();
+
+ if (!netif_running(netdev))
+ goto done;
+
+ tg3_phy_stop(tp);
+
+ tg3_netif_stop(tp);
+
+ del_timer_sync(&tp->timer);
+ tg3_flag_clear(tp, RESTART_TIMER);
+
+ /* Want to make sure that the reset task doesn't run */
+ cancel_work_sync(&tp->reset_task);
+ tg3_flag_clear(tp, TX_RECOVERY_PENDING);
+ tg3_flag_clear(tp, RESTART_TIMER);
+
+ netif_device_detach(netdev);
+
+ /* Clean up software state, even if MMIO is blocked */
+ tg3_full_lock(tp, 0);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+ tg3_full_unlock(tp);
+
+done:
+ if (state == pci_channel_io_perm_failure)
+ err = PCI_ERS_RESULT_DISCONNECT;
+ else
+ pci_disable_device(pdev);
+
+ rtnl_unlock();
+
+ return err;
+}
+
+/**
+ * tg3_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ * At this point, the card has exprienced a hard reset,
+ * followed by fixups by BIOS, and has its config space
+ * set up identically to what it was at cold boot.
+ */
+static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+ int err;
+
+ rtnl_lock();
+
+ if (pci_enable_device(pdev)) {
+ netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
+ goto done;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (!netif_running(netdev)) {
+ rc = PCI_ERS_RESULT_RECOVERED;
+ goto done;
+ }
+
+ err = tg3_power_up(tp);
+ if (err) {
+ netdev_err(netdev, "Failed to restore register access.\n");
+ goto done;
+ }
+
+ rc = PCI_ERS_RESULT_RECOVERED;
+
+done:
+ rtnl_unlock();
+
+ return rc;
+}
+
+/**
+ * tg3_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells
+ * us that its OK to resume normal operation.
+ */
+static void tg3_io_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(netdev);
+ int err;
+
+ rtnl_lock();
+
+ if (!netif_running(netdev))
+ goto done;
+
+ tg3_full_lock(tp, 0);
+ tg3_flag_set(tp, INIT_COMPLETE);
+ err = tg3_restart_hw(tp, 1);
+ tg3_full_unlock(tp);
+ if (err) {
+ netdev_err(netdev, "Cannot restart hardware after reset.\n");
+ goto done;
+ }
+
+ netif_device_attach(netdev);
+
+ tp->timer.expires = jiffies + tp->timer_offset;
+ add_timer(&tp->timer);
+
+ tg3_netif_start(tp);
+
+ tg3_phy_start(tp);
+
+done:
+ rtnl_unlock();
+}
+
+static struct pci_error_handlers tg3_err_handler = {
+ .error_detected = tg3_io_error_detected,
+ .slot_reset = tg3_io_slot_reset,
+ .resume = tg3_io_resume
+};
+
static struct pci_driver tg3_driver = {
.name = DRV_MODULE_NAME,
.id_table = tg3_pci_tbl,
.probe = tg3_init_one,
.remove = __devexit_p(tg3_remove_one),
+ .err_handler = &tg3_err_handler,
.driver.pm = TG3_PM_OPS,
};
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 73884b69b749..ce010cd33895 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -23,11 +23,13 @@
#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
#define TG3_BDINFO_SIZE 0x10UL
-#define TG3_RX_INTERNAL_RING_SZ_5906 32
-
-#define RX_STD_MAX_SIZE_5705 512
-#define RX_STD_MAX_SIZE_5717 2048
-#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
+#define TG3_RX_STD_MAX_SIZE_5700 512
+#define TG3_RX_STD_MAX_SIZE_5717 2048
+#define TG3_RX_JMB_MAX_SIZE_5700 256
+#define TG3_RX_JMB_MAX_SIZE_5717 1024
+#define TG3_RX_RET_MAX_SIZE_5700 1024
+#define TG3_RX_RET_MAX_SIZE_5705 512
+#define TG3_RX_RET_MAX_SIZE_5717 4096
/* First 256 bytes are a mirror of PCI config space. */
#define TG3PCI_VENDOR 0x00000000
@@ -54,6 +56,7 @@
#define TG3PCI_DEVICE_TIGON3_57791 0x16b2
#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
#define TG3PCI_DEVICE_TIGON3_5719 0x1657
+#define TG3PCI_DEVICE_TIGON3_5720 0x165f
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -142,6 +145,7 @@
#define CHIPREV_ID_5717_A0 0x05717000
#define CHIPREV_ID_57765_A0 0x57785000
#define CHIPREV_ID_5719_A0 0x05719000
+#define CHIPREV_ID_5720_A0 0x05720000
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
@@ -163,6 +167,7 @@
#define ASIC_REV_5717 0x5717
#define ASIC_REV_57765 0x57785
#define ASIC_REV_5719 0x5719
+#define ASIC_REV_5720 0x5720
#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
@@ -183,6 +188,7 @@
#define METAL_REV_B2 0x02
#define TG3PCI_DMA_RW_CTRL 0x0000006c
#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
+#define DMA_RWCTRL_TAGGED_STAT_WA 0x00000080
#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
@@ -473,6 +479,8 @@
#define TX_MODE_BIG_BCKOFF_ENABLE 0x00000020
#define TX_MODE_LONG_PAUSE_ENABLE 0x00000040
#define TX_MODE_MBUF_LOCKUP_FIX 0x00000100
+#define TX_MODE_JMB_FRM_LEN 0x00400000
+#define TX_MODE_CNT_DN_MODE 0x00800000
#define MAC_TX_STATUS 0x00000460
#define TX_STATUS_XOFFED 0x00000001
#define TX_STATUS_SENT_XOFF 0x00000002
@@ -487,6 +495,8 @@
#define TX_LENGTHS_IPG_SHIFT 8
#define TX_LENGTHS_IPG_CRS_MASK 0x00003000
#define TX_LENGTHS_IPG_CRS_SHIFT 12
+#define TX_LENGTHS_JMB_FRM_LEN_MSK 0x00ff0000
+#define TX_LENGTHS_CNT_DWN_VAL_MSK 0xff000000
#define MAC_RX_MODE 0x00000468
#define RX_MODE_RESET 0x00000001
#define RX_MODE_ENABLE 0x00000002
@@ -1079,6 +1089,9 @@
#define CPMU_HST_ACC_MACCLK_6_25 0x00130000
/* 0x3620 --> 0x3630 unused */
+#define TG3_CPMU_CLCK_ORIDE 0x00003624
+#define CPMU_CLCK_ORIDE_MAC_ORIDE_EN 0x80000000
+
#define TG3_CPMU_CLCK_STAT 0x00003630
#define CPMU_CLCK_STAT_MAC_CLCK_MASK 0x001f0000
#define CPMU_CLCK_STAT_MAC_CLCK_62_5 0x00000000
@@ -1188,6 +1201,7 @@
#define HOSTCC_STATS_BLK_NIC_ADDR 0x00003c40
#define HOSTCC_STATUS_BLK_NIC_ADDR 0x00003c44
#define HOSTCC_FLOW_ATTN 0x00003c48
+#define HOSTCC_FLOW_ATTN_MBUF_LWM 0x00000040
/* 0x3c4c --> 0x3c50 unused */
#define HOSTCC_JUMBO_CON_IDX 0x00003c50
#define HOSTCC_STD_CON_IDX 0x00003c54
@@ -1321,6 +1335,7 @@
#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
+#define RDMAC_MODE_H2BNC_VLAN_DET 0x20000000
#define RDMAC_STATUS 0x00004804
#define RDMAC_STATUS_TGTABORT 0x00000004
#define RDMAC_STATUS_MSTABORT 0x00000008
@@ -1597,6 +1612,7 @@
#define MSGINT_MODE_ONE_SHOT_DISABLE 0x00000020
#define MSGINT_MODE_MULTIVEC_EN 0x00000080
#define MSGINT_STATUS 0x00006004
+#define MSGINT_STATUS_MSI_REQ 0x00000001
#define MSGINT_FIFO 0x00006008
/* 0x600c --> 0x6400 unused */
@@ -1613,6 +1629,8 @@
#define GRC_MODE_WSWAP_NONFRM_DATA 0x00000004
#define GRC_MODE_BSWAP_DATA 0x00000010
#define GRC_MODE_WSWAP_DATA 0x00000020
+#define GRC_MODE_BYTE_SWAP_B2HRX_DATA 0x00000040
+#define GRC_MODE_WORD_SWAP_B2HRX_DATA 0x00000080
#define GRC_MODE_SPLITHDR 0x00000100
#define GRC_MODE_NOFRM_CRACKING 0x00000200
#define GRC_MODE_INCL_CRC 0x00000400
@@ -1620,8 +1638,10 @@
#define GRC_MODE_NOIRQ_ON_SENDS 0x00002000
#define GRC_MODE_NOIRQ_ON_RCV 0x00004000
#define GRC_MODE_FORCE_PCI32BIT 0x00008000
+#define GRC_MODE_B2HRX_ENABLE 0x00008000
#define GRC_MODE_HOST_STACKUP 0x00010000
#define GRC_MODE_HOST_SENDBDS 0x00020000
+#define GRC_MODE_HTX2B_ENABLE 0x00040000
#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
#define GRC_MODE_PCIE_TL_SEL 0x00000000
@@ -1818,6 +1838,38 @@
#define FLASH_5717VENDOR_ATMEL_45USPT 0x03400000
#define FLASH_5717VENDOR_ST_25USPT 0x03400002
#define FLASH_5717VENDOR_ST_45USPT 0x03400001
+#define FLASH_5720_EEPROM_HD 0x00000001
+#define FLASH_5720_EEPROM_LD 0x00000003
+#define FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
+#define FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
+#define FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
+#define FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003
+#define FLASH_5720VENDOR_M_ST_M25PE10 0x02000000
+#define FLASH_5720VENDOR_M_ST_M25PE20 0x02000002
+#define FLASH_5720VENDOR_M_ST_M25PE40 0x02000001
+#define FLASH_5720VENDOR_M_ST_M25PE80 0x02000003
+#define FLASH_5720VENDOR_M_ST_M45PE10 0x03000000
+#define FLASH_5720VENDOR_M_ST_M45PE20 0x03000002
+#define FLASH_5720VENDOR_M_ST_M45PE40 0x03000001
+#define FLASH_5720VENDOR_M_ST_M45PE80 0x03000003
+#define FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000
+#define FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002
+#define FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001
+#define FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000
+#define FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002
+#define FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001
+#define FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003
+#define FLASH_5720VENDOR_A_ST_M25PE10 0x02800000
+#define FLASH_5720VENDOR_A_ST_M25PE20 0x02800002
+#define FLASH_5720VENDOR_A_ST_M25PE40 0x02800001
+#define FLASH_5720VENDOR_A_ST_M25PE80 0x02800003
+#define FLASH_5720VENDOR_A_ST_M45PE10 0x02c00000
+#define FLASH_5720VENDOR_A_ST_M45PE20 0x02c00002
+#define FLASH_5720VENDOR_A_ST_M45PE40 0x02c00001
+#define FLASH_5720VENDOR_A_ST_M45PE80 0x02c00003
+#define FLASH_5720VENDOR_ATMEL_45USPT 0x03c00000
+#define FLASH_5720VENDOR_ST_25USPT 0x03c00002
+#define FLASH_5720VENDOR_ST_45USPT 0x03c00001
#define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000
#define FLASH_5752PAGE_SIZE_256 0x00000000
#define FLASH_5752PAGE_SIZE_512 0x10000000
@@ -1904,6 +1956,8 @@
#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014
#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000
+#define TG3_REG_BLK_SIZE 0x00008000
+
/* OTP bit definitions */
#define TG3_OTP_AGCTGT_MASK 0x000000e0
#define TG3_OTP_AGCTGT_SHIFT 1
@@ -1955,7 +2009,9 @@
#define TG3_NVM_DIR_END 0x78
#define TG3_NVM_DIRENT_SIZE 0xc
#define TG3_NVM_DIRTYPE_SHIFT 24
+#define TG3_NVM_DIRTYPE_LENMSK 0x003fffff
#define TG3_NVM_DIRTYPE_ASFINI 1
+#define TG3_NVM_DIRTYPE_EXTVPD 20
#define TG3_NVM_PTREV_BCVER 0x94
#define TG3_NVM_BCVER_MAJMSK 0x0000ff00
#define TG3_NVM_BCVER_MAJSFT 8
@@ -2079,6 +2135,13 @@
#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32
+
+#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700 64
+#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717 16
+
/* Currently this is fixed. */
#define TG3_PHY_MII_ADDR 0x01
@@ -2130,23 +2193,30 @@
#define MII_TG3_DSP_EXP96 0x0f96
#define MII_TG3_DSP_EXP97 0x0f97
-#define MII_TG3_AUX_CTRL 0x18 /* auxilliary control register */
+#define MII_TG3_AUX_CTRL 0x18 /* auxiliary control register */
+
+#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000
+#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400
+#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800
+#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN 0x4000
+#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002
+#define MII_TG3_AUXCTL_PCTL_WOL_EN 0x0008
#define MII_TG3_AUXCTL_PCTL_100TX_LPWR 0x0010
#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE 0x0020
+#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC 0x0040
#define MII_TG3_AUXCTL_PCTL_VREG_11V 0x0180
-#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL 0x0002
-#define MII_TG3_AUXCTL_MISC_WREN 0x8000
-#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200
-#define MII_TG3_AUXCTL_MISC_RDSEL_MISC 0x7000
+#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST 0x0004
+
#define MII_TG3_AUXCTL_SHDWSEL_MISC 0x0007
+#define MII_TG3_AUXCTL_MISC_WIRESPD_EN 0x0010
+#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX 0x0200
+#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT 12
+#define MII_TG3_AUXCTL_MISC_WREN 0x8000
-#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA 0x0800
-#define MII_TG3_AUXCTL_ACTL_TX_6DB 0x0400
-#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL 0x0000
-#define MII_TG3_AUX_STAT 0x19 /* auxilliary status register */
+#define MII_TG3_AUX_STAT 0x19 /* auxiliary status register */
#define MII_TG3_AUX_STAT_LPASS 0x0004
#define MII_TG3_AUX_STAT_SPDMASK 0x0700
#define MII_TG3_AUX_STAT_10HALF 0x0100
@@ -2546,6 +2616,7 @@ struct tg3_hw_stats {
tg3_stat64_t dma_write_prioq_full;
tg3_stat64_t rxbds_empty;
tg3_stat64_t rx_discards;
+ tg3_stat64_t mbuf_lwm_thresh_hit;
tg3_stat64_t rx_errors;
tg3_stat64_t rx_threshold_hit;
@@ -2745,6 +2816,86 @@ struct tg3_napi {
unsigned int irq_vec;
};
+enum TG3_FLAGS {
+ TG3_FLAG_TAGGED_STATUS = 0,
+ TG3_FLAG_TXD_MBOX_HWBUG,
+ TG3_FLAG_USE_LINKCHG_REG,
+ TG3_FLAG_ERROR_PROCESSED,
+ TG3_FLAG_ENABLE_ASF,
+ TG3_FLAG_ASPM_WORKAROUND,
+ TG3_FLAG_POLL_SERDES,
+ TG3_FLAG_MBOX_WRITE_REORDER,
+ TG3_FLAG_PCIX_TARGET_HWBUG,
+ TG3_FLAG_WOL_SPEED_100MB,
+ TG3_FLAG_WOL_ENABLE,
+ TG3_FLAG_EEPROM_WRITE_PROT,
+ TG3_FLAG_NVRAM,
+ TG3_FLAG_NVRAM_BUFFERED,
+ TG3_FLAG_SUPPORT_MSI,
+ TG3_FLAG_SUPPORT_MSIX,
+ TG3_FLAG_PCIX_MODE,
+ TG3_FLAG_PCI_HIGH_SPEED,
+ TG3_FLAG_PCI_32BIT,
+ TG3_FLAG_SRAM_USE_CONFIG,
+ TG3_FLAG_TX_RECOVERY_PENDING,
+ TG3_FLAG_WOL_CAP,
+ TG3_FLAG_JUMBO_RING_ENABLE,
+ TG3_FLAG_PAUSE_AUTONEG,
+ TG3_FLAG_CPMU_PRESENT,
+ TG3_FLAG_40BIT_DMA_BUG,
+ TG3_FLAG_BROKEN_CHECKSUMS,
+ TG3_FLAG_JUMBO_CAPABLE,
+ TG3_FLAG_CHIP_RESETTING,
+ TG3_FLAG_INIT_COMPLETE,
+ TG3_FLAG_RESTART_TIMER,
+ TG3_FLAG_TSO_BUG,
+ TG3_FLAG_IS_5788,
+ TG3_FLAG_MAX_RXPEND_64,
+ TG3_FLAG_TSO_CAPABLE,
+ TG3_FLAG_PCI_EXPRESS,
+ TG3_FLAG_ASF_NEW_HANDSHAKE,
+ TG3_FLAG_HW_AUTONEG,
+ TG3_FLAG_IS_NIC,
+ TG3_FLAG_FLASH,
+ TG3_FLAG_HW_TSO_1,
+ TG3_FLAG_5705_PLUS,
+ TG3_FLAG_5750_PLUS,
+ TG3_FLAG_HW_TSO_3,
+ TG3_FLAG_USING_MSI,
+ TG3_FLAG_USING_MSIX,
+ TG3_FLAG_ICH_WORKAROUND,
+ TG3_FLAG_5780_CLASS,
+ TG3_FLAG_HW_TSO_2,
+ TG3_FLAG_1SHOT_MSI,
+ TG3_FLAG_NO_FWARE_REPORTED,
+ TG3_FLAG_NO_NVRAM_ADDR_TRANS,
+ TG3_FLAG_ENABLE_APE,
+ TG3_FLAG_PROTECTED_NVRAM,
+ TG3_FLAG_5701_DMA_BUG,
+ TG3_FLAG_USE_PHYLIB,
+ TG3_FLAG_MDIOBUS_INITED,
+ TG3_FLAG_LRG_PROD_RING_CAP,
+ TG3_FLAG_RGMII_INBAND_DISABLE,
+ TG3_FLAG_RGMII_EXT_IBND_RX_EN,
+ TG3_FLAG_RGMII_EXT_IBND_TX_EN,
+ TG3_FLAG_CLKREQ_BUG,
+ TG3_FLAG_5755_PLUS,
+ TG3_FLAG_NO_NVRAM,
+ TG3_FLAG_ENABLE_RSS,
+ TG3_FLAG_ENABLE_TSS,
+ TG3_FLAG_4G_DMA_BNDRY_BUG,
+ TG3_FLAG_40BIT_DMA_LIMIT_BUG,
+ TG3_FLAG_SHORT_DMA_BUG,
+ TG3_FLAG_USE_JUMBO_BDFLAG,
+ TG3_FLAG_L1PLLPD_EN,
+ TG3_FLAG_57765_PLUS,
+ TG3_FLAG_APE_HAS_NCSI,
+ TG3_FLAG_5717_PLUS,
+
+ /* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
+ TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
+};
+
struct tg3 {
/* begin "general, frequently-used members" cacheline section */
@@ -2768,7 +2919,7 @@ struct tg3 {
/* SMP locking strategy:
*
* lock: Held during reset, PHY access, timer, and when
- * updating tg3_flags and tg3_flags2.
+ * updating tg3_flags.
*
* netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
* netif_tx_lock when it needs to call
@@ -2825,94 +2976,13 @@ struct tg3 {
struct tg3_ethtool_stats estats;
struct tg3_ethtool_stats estats_prev;
+ DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
+
union {
unsigned long phy_crc_errors;
unsigned long last_event_jiffies;
};
- u32 tg3_flags;
-#define TG3_FLAG_TAGGED_STATUS 0x00000001
-#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
-#define TG3_FLAG_RX_CHECKSUMS 0x00000004
-#define TG3_FLAG_USE_LINKCHG_REG 0x00000008
-#define TG3_FLAG_ENABLE_ASF 0x00000020
-#define TG3_FLAG_ASPM_WORKAROUND 0x00000040
-#define TG3_FLAG_POLL_SERDES 0x00000080
-#define TG3_FLAG_MBOX_WRITE_REORDER 0x00000100
-#define TG3_FLAG_PCIX_TARGET_HWBUG 0x00000200
-#define TG3_FLAG_WOL_SPEED_100MB 0x00000400
-#define TG3_FLAG_WOL_ENABLE 0x00000800
-#define TG3_FLAG_EEPROM_WRITE_PROT 0x00001000
-#define TG3_FLAG_NVRAM 0x00002000
-#define TG3_FLAG_NVRAM_BUFFERED 0x00004000
-#define TG3_FLAG_SUPPORT_MSI 0x00008000
-#define TG3_FLAG_SUPPORT_MSIX 0x00010000
-#define TG3_FLAG_SUPPORT_MSI_OR_MSIX (TG3_FLAG_SUPPORT_MSI | \
- TG3_FLAG_SUPPORT_MSIX)
-#define TG3_FLAG_PCIX_MODE 0x00020000
-#define TG3_FLAG_PCI_HIGH_SPEED 0x00040000
-#define TG3_FLAG_PCI_32BIT 0x00080000
-#define TG3_FLAG_SRAM_USE_CONFIG 0x00100000
-#define TG3_FLAG_TX_RECOVERY_PENDING 0x00200000
-#define TG3_FLAG_WOL_CAP 0x00400000
-#define TG3_FLAG_JUMBO_RING_ENABLE 0x00800000
-#define TG3_FLAG_PAUSE_AUTONEG 0x02000000
-#define TG3_FLAG_CPMU_PRESENT 0x04000000
-#define TG3_FLAG_40BIT_DMA_BUG 0x08000000
-#define TG3_FLAG_BROKEN_CHECKSUMS 0x10000000
-#define TG3_FLAG_JUMBO_CAPABLE 0x20000000
-#define TG3_FLAG_CHIP_RESETTING 0x40000000
-#define TG3_FLAG_INIT_COMPLETE 0x80000000
- u32 tg3_flags2;
-#define TG3_FLG2_RESTART_TIMER 0x00000001
-#define TG3_FLG2_TSO_BUG 0x00000002
-#define TG3_FLG2_IS_5788 0x00000008
-#define TG3_FLG2_MAX_RXPEND_64 0x00000010
-#define TG3_FLG2_TSO_CAPABLE 0x00000020
-#define TG3_FLG2_PCI_EXPRESS 0x00000200
-#define TG3_FLG2_ASF_NEW_HANDSHAKE 0x00000400
-#define TG3_FLG2_HW_AUTONEG 0x00000800
-#define TG3_FLG2_IS_NIC 0x00001000
-#define TG3_FLG2_FLASH 0x00008000
-#define TG3_FLG2_HW_TSO_1 0x00010000
-#define TG3_FLG2_5705_PLUS 0x00040000
-#define TG3_FLG2_5750_PLUS 0x00080000
-#define TG3_FLG2_HW_TSO_3 0x00100000
-#define TG3_FLG2_USING_MSI 0x00200000
-#define TG3_FLG2_USING_MSIX 0x00400000
-#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \
- TG3_FLG2_USING_MSIX)
-#define TG3_FLG2_ICH_WORKAROUND 0x02000000
-#define TG3_FLG2_5780_CLASS 0x04000000
-#define TG3_FLG2_HW_TSO_2 0x08000000
-#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | \
- TG3_FLG2_HW_TSO_2 | \
- TG3_FLG2_HW_TSO_3)
-#define TG3_FLG2_1SHOT_MSI 0x10000000
-#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
- u32 tg3_flags3;
-#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001
-#define TG3_FLG3_ENABLE_APE 0x00000002
-#define TG3_FLG3_PROTECTED_NVRAM 0x00000004
-#define TG3_FLG3_5701_DMA_BUG 0x00000008
-#define TG3_FLG3_USE_PHYLIB 0x00000010
-#define TG3_FLG3_MDIOBUS_INITED 0x00000020
-#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100
-#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
-#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
-#define TG3_FLG3_CLKREQ_BUG 0x00000800
-#define TG3_FLG3_5755_PLUS 0x00002000
-#define TG3_FLG3_NO_NVRAM 0x00004000
-#define TG3_FLG3_ENABLE_RSS 0x00020000
-#define TG3_FLG3_ENABLE_TSS 0x00040000
-#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000
-#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
-#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
-#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
-#define TG3_FLG3_L1PLLPD_EN 0x00800000
-#define TG3_FLG3_5717_PLUS 0x01000000
-#define TG3_FLG3_APE_HAS_NCSI 0x02000000
-
struct timer_list timer;
u16 timer_counter;
u16 timer_multiplier;
@@ -2983,6 +3053,7 @@ struct tg3 {
#define TG3_PHY_ID_BCM5718S 0xbc050ff0
#define TG3_PHY_ID_BCM57765 0x5c0d8a40
#define TG3_PHY_ID_BCM5719C 0x5c0d8a20
+#define TG3_PHY_ID_BCM5720C 0x5c0d8b60
#define TG3_PHY_ID_BCM5906 0xdc00ac40
#define TG3_PHY_ID_BCM8002 0x60010140
#define TG3_PHY_ID_INVALID 0xffffffff
@@ -3049,6 +3120,7 @@ struct tg3 {
int nvram_lock_cnt;
u32 nvram_size;
+#define TG3_NVRAM_SIZE_2KB 0x00000800
#define TG3_NVRAM_SIZE_64KB 0x00010000
#define TG3_NVRAM_SIZE_128KB 0x00020000
#define TG3_NVRAM_SIZE_256KB 0x00040000
@@ -3064,6 +3136,9 @@ struct tg3 {
#define JEDEC_SAIFUN 0x4f
#define JEDEC_SST 0xbf
+#define ATMEL_AT24C02_CHIP_SIZE TG3_NVRAM_SIZE_2KB
+#define ATMEL_AT24C02_PAGE_SIZE (8)
+
#define ATMEL_AT24C64_CHIP_SIZE TG3_NVRAM_SIZE_64KB
#define ATMEL_AT24C64_PAGE_SIZE (32)
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
index 0825db6d883f..1e980fdd9d77 100644
--- a/drivers/net/tile/tilepro.c
+++ b/drivers/net/tile/tilepro.c
@@ -1930,7 +1930,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
unsigned int len = skb->len;
unsigned char *data = skb->data;
- unsigned int csum_start = skb->csum_start - skb_headroom(skb);
+ unsigned int csum_start = skb_checksum_start_offset(skb);
lepp_frag_t frags[LEPP_MAX_FRAGS];
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 10800f16a231..ff32befd8443 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -208,7 +208,7 @@ static void print_rx_state(struct net_device *dev)
* passing/getting the next value from the nic. As with all requests
* on this nic it has to be done in two stages, a) tell the nic which
* memory address you want to access and b) pass/get the value from the nic.
- * With the EEProm, you have to wait before and inbetween access a) and b).
+ * With the EEProm, you have to wait before and between access a) and b).
* As this is only read at initialization time and the wait period is very
* small we shouldn't have to worry about scheduling issues.
*/
@@ -1251,7 +1251,7 @@ static netdev_tx_t xl_xmit(struct sk_buff *skb, struct net_device *dev)
/*
* The NIC has told us that a packet has been downloaded onto the card, we must
* find out which packet it has done, clear the skb and information for the packet
- * then advance around the ring for all tranmitted packets
+ * then advance around the ring for all transmitted packets
*/
static void xl_dn_comp(struct net_device *dev)
@@ -1568,7 +1568,7 @@ static void xl_arb_cmd(struct net_device *dev)
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 5bd140704533..9354ca9da576 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1675,7 +1675,7 @@ drop_frame:
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n", dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 785ad1a2157b..1313aa1315f0 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -73,7 +73,7 @@ static void madgemc_setint(struct net_device *dev, int val);
static irqreturn_t madgemc_interrupt(int irq, void *dev_id);
/*
- * These work around paging, however they don't guarentee you're on the
+ * These work around paging, however they don't guarantee you're on the
* right page.
*/
#define SIFREADB(reg) (inb(dev->base_addr + ((reg<0x8)?reg:reg-0x8)))
@@ -387,7 +387,7 @@ getout:
* both with their own disadvantages...
*
* 1) Read in the SIFSTS register from the TMS controller. This
- * is guarenteed to be accurate, however, there's a fairly
+ * is guaranteed to be accurate, however, there's a fairly
* large performance penalty for doing so: the Madge chips
* must request the register from the Eagle, the Eagle must
* read them from its internal bus, and then take the route
@@ -454,7 +454,7 @@ static irqreturn_t madgemc_interrupt(int irq, void *dev_id)
}
/*
- * Set the card to the prefered ring speed.
+ * Set the card to the preferred ring speed.
*
* Unlike newer cards, the MC16/32 have their speed selection
* circuit connected to the Madge ASICs and not to the TMS380
@@ -727,7 +727,7 @@ static int __devexit madgemc_remove(struct device *device)
return 0;
}
-static short madgemc_adapter_ids[] __initdata = {
+static const short madgemc_adapter_ids[] __devinitconst = {
0x002d,
0x0000
};
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3d2fbe60b46e..e3855aeb13d4 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -86,6 +86,7 @@
#include <linux/timer.h>
#include <linux/in.h>
#include <linux/ioport.h>
+#include <linux/seq_file.h>
#include <linux/string.h>
#include <linux/proc_fs.h>
#include <linux/ptrace.h>
@@ -193,7 +194,7 @@ static void olympic_arb_cmd(struct net_device *dev);
static int olympic_change_mtu(struct net_device *dev, int mtu);
static void olympic_srb_bh(struct net_device *dev) ;
static void olympic_asb_bh(struct net_device *dev) ;
-static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) ;
+static const struct file_operations olympic_proc_ops;
static const struct net_device_ops olympic_netdev_ops = {
.ndo_open = olympic_open,
@@ -272,7 +273,7 @@ static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device
char proc_name[20] ;
strcpy(proc_name,"olympic_") ;
strcat(proc_name,dev->name) ;
- create_proc_read_entry(proc_name,0,init_net.proc_net,olympic_proc_info,(void *)dev) ;
+ proc_create_data(proc_name, 0, init_net.proc_net, &olympic_proc_ops, dev);
printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
}
return 0 ;
@@ -1500,7 +1501,7 @@ drop_frame:
if (lan_status_diff & LSC_SOFT_ERR)
printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
if (lan_status_diff & LSC_TRAN_BCN)
- printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
+ printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
if (lan_status_diff & LSC_SS)
printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
if (lan_status_diff & LSC_RING_REC)
@@ -1615,29 +1616,25 @@ static int olympic_change_mtu(struct net_device *dev, int mtu)
return 0 ;
}
-static int olympic_proc_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
+static int olympic_proc_show(struct seq_file *m, void *v)
{
- struct net_device *dev = (struct net_device *)data ;
+ struct net_device *dev = m->private;
struct olympic_private *olympic_priv=netdev_priv(dev);
u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
- int size = 0 ;
- int len=0;
- off_t begin=0;
- off_t pos=0;
u8 addr[6];
u8 addr2[6];
int i;
- size = sprintf(buffer,
+ seq_printf(m,
"IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
- size += sprintf(buffer+size, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
+ seq_printf(m, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
dev->name);
for (i = 0 ; i < 6 ; i++)
addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
- size += sprintf(buffer+size, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
+ seq_printf(m, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
dev->name,
dev->dev_addr, addr,
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
@@ -1645,9 +1642,9 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
- size += sprintf(buffer+size, "\n%6s: Token Ring Parameters Table:\n", dev->name);
+ seq_printf(m, "\n%6s: Token Ring Parameters Table:\n", dev->name);
- size += sprintf(buffer+size, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
+ seq_printf(m, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
dev->name) ;
for (i = 0 ; i < 6 ; i++)
@@ -1655,7 +1652,7 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
for (i = 0 ; i < 6 ; i++)
addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
- size += sprintf(buffer+size, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
+ seq_printf(m, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
dev->name,
readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
@@ -1666,12 +1663,12 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
- size += sprintf(buffer+size, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
+ seq_printf(m, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
dev->name) ;
for (i = 0 ; i < 6 ; i++)
addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
- size += sprintf(buffer+size, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
+ seq_printf(m, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
dev->name, addr,
swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
@@ -1680,12 +1677,12 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
- size += sprintf(buffer+size, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
+ seq_printf(m, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
dev->name) ;
for (i = 0 ; i < 6 ; i++)
addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
- size += sprintf(buffer+size, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
+ seq_printf(m, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
dev->name,
swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
@@ -1695,19 +1692,21 @@ static int olympic_proc_info(char *buffer, char **start, off_t offset, int lengt
readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
- len=size;
- pos=begin+size;
- if (pos<offset) {
- len=0;
- begin=pos;
- }
- *start=buffer+(offset-begin); /* Start of wanted data */
- len-=(offset-begin); /* Start slop */
- if(len>length)
- len=length; /* Ending slop */
- return len;
+ return 0;
}
+static int olympic_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, olympic_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations olympic_proc_ops = {
+ .open = olympic_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static void __devexit olympic_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev) ;
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index 63db5a6762ae..d9044aba7afa 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -393,7 +393,7 @@ static int smctr_alloc_shared_memory(struct net_device *dev)
tp->rx_bdb_end[NON_MAC_QUEUE] = (BDBlock *)smctr_malloc(dev, 0);
/* Allocate MAC transmit buffers.
- * MAC Tx Buffers doen't have to be on an ODD Boundry.
+ * MAC Tx Buffers doen't have to be on an ODD Boundary.
*/
tp->tx_buff_head[MAC_QUEUE]
= (__u16 *)smctr_malloc(dev, tp->tx_buff_size[MAC_QUEUE]);
@@ -415,7 +415,7 @@ static int smctr_alloc_shared_memory(struct net_device *dev)
/* Allocate Non-MAC transmit buffers.
* ?? For maximum Netware performance, put Tx Buffers on
- * ODD Boundry and then restore malloc to Even Boundrys.
+ * ODD Boundary and then restore malloc to Even Boundrys.
*/
smctr_malloc(dev, 1L);
tp->tx_buff_head[NON_MAC_QUEUE]
@@ -1311,7 +1311,7 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
mem_used += sizeof(BDBlock) * tp->num_rx_bdbs[MAC_QUEUE];
/* Allocate MAC transmit buffers.
- * MAC transmit buffers don't have to be on an ODD Boundry.
+ * MAC transmit buffers don't have to be on an ODD Boundary.
*/
mem_used += tp->tx_buff_size[MAC_QUEUE];
@@ -1325,7 +1325,7 @@ static unsigned int smctr_get_num_rx_bdbs(struct net_device *dev)
/* Allocate Non-MAC transmit buffers.
* For maximum Netware performance, put Tx Buffers on
- * ODD Boundry,and then restore malloc to Even Boundrys.
+ * ODD Boundary,and then restore malloc to Even Boundrys.
*/
mem_used += 1L;
mem_used += tp->tx_buff_size[NON_MAC_QUEUE];
@@ -3069,8 +3069,8 @@ static int smctr_load_node_addr(struct net_device *dev)
* disabled.!?
*
* NOTE 2: If the monitor_state is MS_BEACON_TEST_STATE and the receive_mask
- * has any multi-cast or promiscous bits set, the receive_mask needs to
- * be changed to clear the multi-cast or promiscous mode bits, the lobe_test
+ * has any multi-cast or promiscuous bits set, the receive_mask needs to
+ * be changed to clear the multi-cast or promiscuous mode bits, the lobe_test
* run, and then the receive mask set back to its original value if the test
* is successful.
*/
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
index 60b30ee38dcb..e5a617c586c2 100644
--- a/drivers/net/tokenring/tms380tr.h
+++ b/drivers/net/tokenring/tms380tr.h
@@ -442,7 +442,7 @@ typedef struct {
#define PASS_FIRST_BUF_ONLY 0x0100 /* Passes only first internal buffer
* of each received frame; FrameSize
* of RPLs must contain internal
- * BUFFER_SIZE bits for promiscous mode.
+ * BUFFER_SIZE bits for promiscuous mode.
*/
#define ENABLE_FULL_DUPLEX_SELECTION 0x2000
/* Enable the use of full-duplex
diff --git a/drivers/net/tsi108_eth.h b/drivers/net/tsi108_eth.h
index 5a77ae6c5f36..5fee7d78dc6d 100644
--- a/drivers/net/tsi108_eth.h
+++ b/drivers/net/tsi108_eth.h
@@ -305,9 +305,9 @@
#define TSI108_TX_CRC (1 << 5) /* Generate CRC for this packet */
#define TSI108_TX_INT (1 << 14) /* Generate an IRQ after frag. processed */
#define TSI108_TX_RETRY (0xf << 16) /* 4 bit field indicating num. of retries */
-#define TSI108_TX_COL (1 << 20) /* Set if a collision occured */
-#define TSI108_TX_LCOL (1 << 24) /* Set if a late collision occured */
-#define TSI108_TX_UNDER (1 << 25) /* Set if a FIFO underrun occured */
+#define TSI108_TX_COL (1 << 20) /* Set if a collision occurred */
+#define TSI108_TX_LCOL (1 << 24) /* Set if a late collision occurred */
+#define TSI108_TX_UNDER (1 << 25) /* Set if a FIFO underrun occurred */
#define TSI108_TX_RLIM (1 << 26) /* Set if the retry limit was reached */
#define TSI108_TX_OK (1 << 30) /* Set if the frame TX was successful */
#define TSI108_TX_OWN (1 << 31) /* Set if the device owns the descriptor */
@@ -332,7 +332,7 @@ typedef struct {
#define TSI108_RX_RUNT (1 << 4)/* Packet is less than minimum size */
#define TSI108_RX_HASH (1 << 7)/* Hash table match */
#define TSI108_RX_BAD (1 << 8) /* Bad frame */
-#define TSI108_RX_OVER (1 << 9) /* FIFO overrun occured */
+#define TSI108_RX_OVER (1 << 9) /* FIFO overrun occurred */
#define TSI108_RX_TRUNC (1 << 11) /* Packet truncated due to excess length */
#define TSI108_RX_CRC (1 << 12) /* Packet had a CRC error */
#define TSI108_RX_INT (1 << 13) /* Generate an IRQ after frag. processed */
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 007d8e75666d..092c3faa882a 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -122,8 +122,8 @@ void t21142_start_nway(struct net_device *dev)
tp->nway = tp->mediasense = 1;
tp->nwayset = tp->lpar = 0;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%08x\n",
- dev->name, csr14);
+ netdev_dbg(dev, "Restarting 21143 autonegotiation, csr14=%08x\n",
+ csr14);
iowrite32(0x0001, ioaddr + CSR13);
udelay(100);
iowrite32(csr14, ioaddr + CSR14);
@@ -206,14 +206,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
#if 0 /* Restart shouldn't be needed. */
iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %08x\n",
- dev->name, ioread32(ioaddr + CSR5));
+ netdev_dbg(dev, " Restarting Tx and Rx, CSR5 is %08x\n",
+ ioread32(ioaddr + CSR5));
#endif
tulip_start_rxtx(tp);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
- dev->name, tp->csr6, ioread32(ioaddr + CSR6),
- ioread32(ioaddr + CSR12));
+ netdev_dbg(dev, " Setting CSR6 %08x/%x CSR12 %08x\n",
+ tp->csr6, ioread32(ioaddr + CSR6),
+ ioread32(ioaddr + CSR12));
} else if ((tp->nwayset && (csr5 & 0x08000000) &&
(dev->if_port == 3 || dev->if_port == 5) &&
(csr12 & 2) == 2) ||
diff --git a/drivers/net/tulip/Makefile b/drivers/net/tulip/Makefile
index 200cbf7c815c..5e8be38b45bb 100644
--- a/drivers/net/tulip/Makefile
+++ b/drivers/net/tulip/Makefile
@@ -2,6 +2,8 @@
# Makefile for the Linux "Tulip" family network device drivers.
#
+ccflags-$(CONFIG_NET_TULIP) := -DDEBUG
+
obj-$(CONFIG_PCMCIA_XIRCOM) += xircom_cb.o
obj-$(CONFIG_DM9102) += dmfe.o
obj-$(CONFIG_WINBOND_840) += winbond-840.o
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index b13c6b040be3..e2f692351180 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -27,6 +27,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "de2104x"
#define DRV_VERSION "0.7"
#define DRV_RELDATE "Mar 17, 2004"
@@ -51,7 +53,7 @@
/* These identify the driver base version and may not be removed. */
static char version[] =
-KERN_INFO DRV_NAME " PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
+"PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
@@ -73,8 +75,6 @@ static int rx_copybreak = 100;
module_param (rx_copybreak, int, 0);
MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
-#define PFX DRV_NAME ": "
-
#define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK | \
@@ -377,18 +377,16 @@ static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
u32 status, u32 len)
{
- if (netif_msg_rx_err (de))
- printk (KERN_DEBUG
- "%s: rx err, slot %d status 0x%x len %d\n",
- de->dev->name, rx_tail, status, len);
+ netif_dbg(de, rx_err, de->dev,
+ "rx err, slot %d status 0x%x len %d\n",
+ rx_tail, status, len);
if ((status & 0x38000300) != 0x0300) {
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
- if (netif_msg_rx_err(de))
- dev_warn(&de->dev->dev,
- "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
- status);
+ netif_warn(de, rx_err, de->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
de->net_stats.rx_length_errors++;
}
} else if (status & RxError) {
@@ -435,10 +433,9 @@ static void de_rx (struct de_private *de)
copying_skb = (len <= rx_copybreak);
- if (unlikely(netif_msg_rx_status(de)))
- printk(KERN_DEBUG "%s: rx slot %d status 0x%x len %d copying? %d\n",
- de->dev->name, rx_tail, status, len,
- copying_skb);
+ netif_dbg(de, rx_status, de->dev,
+ "rx slot %d status 0x%x len %d copying? %d\n",
+ rx_tail, status, len, copying_skb);
buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
copy_skb = dev_alloc_skb (buflen);
@@ -491,7 +488,7 @@ rx_next:
}
if (!rx_work)
- dev_warn(&de->dev->dev, "rx work limit reached\n");
+ netdev_warn(de->dev, "rx work limit reached\n");
de->rx_tail = rx_tail;
}
@@ -506,10 +503,9 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
return IRQ_NONE;
- if (netif_msg_intr(de))
- printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
- dev->name, status, dr32(MacMode),
- de->rx_tail, de->tx_head, de->tx_tail);
+ netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
+ status, dr32(MacMode),
+ de->rx_tail, de->tx_head, de->tx_tail);
dw32(MacStatus, status);
@@ -534,9 +530,9 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
- dev_err(&de->dev->dev,
- "PCI bus error, status=%08x, PCI status=%04x\n",
- status, pci_status);
+ netdev_err(de->dev,
+ "PCI bus error, status=%08x, PCI status=%04x\n",
+ status, pci_status);
}
return IRQ_HANDLED;
@@ -572,9 +568,9 @@ static void de_tx (struct de_private *de)
if (status & LastFrag) {
if (status & TxError) {
- if (netif_msg_tx_err(de))
- printk(KERN_DEBUG "%s: tx err, status 0x%x\n",
- de->dev->name, status);
+ netif_dbg(de, tx_err, de->dev,
+ "tx err, status 0x%x\n",
+ status);
de->net_stats.tx_errors++;
if (status & TxOWC)
de->net_stats.tx_window_errors++;
@@ -587,9 +583,8 @@ static void de_tx (struct de_private *de)
} else {
de->net_stats.tx_packets++;
de->net_stats.tx_bytes += skb->len;
- if (netif_msg_tx_done(de))
- printk(KERN_DEBUG "%s: tx done, slot %d\n",
- de->dev->name, tx_tail);
+ netif_dbg(de, tx_done, de->dev,
+ "tx done, slot %d\n", tx_tail);
}
dev_kfree_skb_irq(skb);
}
@@ -646,9 +641,8 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
wmb();
de->tx_head = NEXT_TX(entry);
- if (netif_msg_tx_queued(de))
- printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
- dev->name, entry, skb->len);
+ netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
+ entry, skb->len);
if (tx_free == 0)
netif_stop_queue(dev);
@@ -873,7 +867,7 @@ static void de_stop_rxtx (struct de_private *de)
udelay(100);
}
- dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
+ netdev_warn(de->dev, "timeout expired, stopping DMA\n");
}
static inline void de_start_rxtx (struct de_private *de)
@@ -907,9 +901,8 @@ static void de_link_up(struct de_private *de)
{
if (!netif_carrier_ok(de->dev)) {
netif_carrier_on(de->dev);
- if (netif_msg_link(de))
- dev_info(&de->dev->dev, "link up, media %s\n",
- media_name[de->media_type]);
+ netif_info(de, link, de->dev, "link up, media %s\n",
+ media_name[de->media_type]);
}
}
@@ -917,8 +910,7 @@ static void de_link_down(struct de_private *de)
{
if (netif_carrier_ok(de->dev)) {
netif_carrier_off(de->dev);
- if (netif_msg_link(de))
- dev_info(&de->dev->dev, "link down\n");
+ netif_info(de, link, de->dev, "link down\n");
}
}
@@ -928,8 +920,7 @@ static void de_set_media (struct de_private *de)
u32 macmode = dr32(MacMode);
if (de_is_running(de))
- dev_warn(&de->dev->dev,
- "chip is running while changing media!\n");
+ netdev_warn(de->dev, "chip is running while changing media!\n");
if (de->de21040)
dw32(CSR11, FULL_DUPLEX_MAGIC);
@@ -948,18 +939,13 @@ static void de_set_media (struct de_private *de)
else
macmode &= ~FullDuplex;
- if (netif_msg_link(de))
- dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
- if (netif_msg_hw(de)) {
- dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
- dr32(MacMode), dr32(SIAStatus),
- dr32(CSR13), dr32(CSR14), dr32(CSR15));
-
- dev_info(&de->dev->dev,
- "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
- macmode, de->media[media].csr13,
- de->media[media].csr14, de->media[media].csr15);
- }
+ netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
+ netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
+ dr32(MacMode), dr32(SIAStatus),
+ dr32(CSR13), dr32(CSR14), dr32(CSR15));
+ netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
+ macmode, de->media[media].csr13,
+ de->media[media].csr14, de->media[media].csr15);
if (macmode != dr32(MacMode))
dw32(MacMode, macmode);
}
@@ -996,9 +982,8 @@ static void de21040_media_timer (unsigned long data)
if (!netif_carrier_ok(dev))
de_link_up(de);
else
- if (netif_msg_timer(de))
- dev_info(&dev->dev, "%s link ok, status %x\n",
- media_name[de->media_type], status);
+ netif_info(de, timer, dev, "%s link ok, status %x\n",
+ media_name[de->media_type], status);
return;
}
@@ -1025,9 +1010,8 @@ no_link_yet:
de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
add_timer(&de->media_timer);
- if (netif_msg_timer(de))
- dev_info(&dev->dev, "no link, trying media %s, status %x\n",
- media_name[de->media_type], status);
+ netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
}
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
@@ -1085,11 +1069,10 @@ static void de21041_media_timer (unsigned long data)
if (!netif_carrier_ok(dev))
de_link_up(de);
else
- if (netif_msg_timer(de))
- dev_info(&dev->dev,
- "%s link ok, mode %x status %x\n",
- media_name[de->media_type],
- dr32(MacMode), status);
+ netif_info(de, timer, dev,
+ "%s link ok, mode %x status %x\n",
+ media_name[de->media_type],
+ dr32(MacMode), status);
return;
}
@@ -1163,9 +1146,8 @@ no_link_yet:
de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
add_timer(&de->media_timer);
- if (netif_msg_timer(de))
- dev_info(&dev->dev, "no link, trying media %s, status %x\n",
- media_name[de->media_type], status);
+ netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
}
static void de_media_interrupt (struct de_private *de, u32 status)
@@ -1401,14 +1383,13 @@ static int de_open (struct net_device *dev)
struct de_private *de = netdev_priv(dev);
int rc;
- if (netif_msg_ifup(de))
- printk(KERN_DEBUG "%s: enabling interface\n", dev->name);
+ netif_dbg(de, ifup, dev, "enabling interface\n");
de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
rc = de_alloc_rings(de);
if (rc) {
- dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
+ netdev_err(dev, "ring allocation failure, err=%d\n", rc);
return rc;
}
@@ -1416,14 +1397,14 @@ static int de_open (struct net_device *dev)
rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
if (rc) {
- dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
- dev->irq, rc);
+ netdev_err(dev, "IRQ %d request failure, err=%d\n",
+ dev->irq, rc);
goto err_out_free;
}
rc = de_init_hw(de);
if (rc) {
- dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
+ netdev_err(dev, "h/w init failure, err=%d\n", rc);
goto err_out_free_irq;
}
@@ -1444,8 +1425,7 @@ static int de_close (struct net_device *dev)
struct de_private *de = netdev_priv(dev);
unsigned long flags;
- if (netif_msg_ifdown(de))
- printk(KERN_DEBUG "%s: disabling interface\n", dev->name);
+ netif_dbg(de, ifdown, dev, "disabling interface\n");
del_timer_sync(&de->media_timer);
@@ -1466,9 +1446,9 @@ static void de_tx_timeout (struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
- printk(KERN_DEBUG "%s: NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
- dev->name, dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
- de->rx_tail, de->tx_head, de->tx_tail);
+ netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
+ dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
+ de->rx_tail, de->tx_head, de->tx_tail);
del_timer_sync(&de->media_timer);
@@ -1518,18 +1498,17 @@ static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
switch (de->media_type) {
case DE_MEDIA_AUI:
ecmd->port = PORT_AUI;
- ecmd->speed = 5;
break;
case DE_MEDIA_BNC:
ecmd->port = PORT_BNC;
- ecmd->speed = 2;
break;
default:
ecmd->port = PORT_TP;
- ecmd->speed = SPEED_10;
break;
}
+ ethtool_cmd_speed_set(ecmd, 10);
+
if (dr32(MacMode) & FullDuplex)
ecmd->duplex = DUPLEX_FULL;
else
@@ -1550,9 +1529,7 @@ static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
u32 new_media;
unsigned int media_lock;
- if (ecmd->speed != SPEED_10 && ecmd->speed != 5 && ecmd->speed != 2)
- return -EINVAL;
- if (de->de21040 && ecmd->speed == 2)
+ if (ethtool_cmd_speed(ecmd) != 10)
return -EINVAL;
if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
return -EINVAL;
@@ -1696,9 +1673,8 @@ static int de_nway_reset(struct net_device *dev)
status = dr32(SIAStatus);
dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
- if (netif_msg_link(de))
- dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
- status, dr32(SIAStatus));
+ netif_info(de, link, dev, "link nway restart, status %x,%x\n",
+ status, dr32(SIAStatus));
return 0;
}
@@ -1743,7 +1719,8 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
de->dev->dev_addr[i] = value;
udelay(1);
if (boguscnt <= 0)
- pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
+ pr_warn("timeout reading 21040 MAC address byte %u\n",
+ i);
}
}
@@ -1929,8 +1906,10 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
de->media[idx].csr14,
de->media[idx].csr15);
- } else if (netif_msg_probe(de))
- pr_cont("\n");
+ } else {
+ if (netif_msg_probe(de))
+ pr_cont("\n");
+ }
if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
break;
@@ -1999,7 +1978,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
#ifndef MODULE
if (board_idx == 0)
- printk("%s", version);
+ pr_info("%s\n", version);
#endif
/* allocate a new ethernet device structure, and fill in defaults */
@@ -2041,7 +2020,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
/* check for invalid IRQ value */
if (pdev->irq < 2) {
rc = -EIO;
- pr_err(PFX "invalid irq (%d) for pci dev %s\n",
+ pr_err("invalid irq (%d) for pci dev %s\n",
pdev->irq, pci_name(pdev));
goto err_out_res;
}
@@ -2052,12 +2031,12 @@ static int __devinit de_init_one (struct pci_dev *pdev,
pciaddr = pci_resource_start(pdev, 1);
if (!pciaddr) {
rc = -EIO;
- pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
+ pr_err("no MMIO resource for pci dev %s\n", pci_name(pdev));
goto err_out_res;
}
if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
rc = -EIO;
- pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
+ pr_err("MMIO resource (%llx) too small on pci dev %s\n",
(unsigned long long)pci_resource_len(pdev, 1),
pci_name(pdev));
goto err_out_res;
@@ -2067,7 +2046,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
if (!regs) {
rc = -EIO;
- pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
+ pr_err("Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
(unsigned long long)pci_resource_len(pdev, 1),
pciaddr, pci_name(pdev));
goto err_out_res;
@@ -2080,7 +2059,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
/* make sure hardware is not running */
rc = de_reset_mac(de);
if (rc) {
- pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
+ pr_err("Cannot reset MAC, pci dev %s\n", pci_name(pdev));
goto err_out_iomap;
}
@@ -2100,11 +2079,11 @@ static int __devinit de_init_one (struct pci_dev *pdev,
goto err_out_iomap;
/* print info about board and interface just registered */
- dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
- de->de21040 ? "21040" : "21041",
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+ de->de21040 ? "21040" : "21041",
+ dev->base_addr,
+ dev->dev_addr,
+ dev->irq);
pci_set_drvdata(pdev, dev);
@@ -2192,7 +2171,7 @@ static int de_resume (struct pci_dev *pdev)
if (!netif_running(dev))
goto out_attach;
if ((retval = pci_enable_device(pdev))) {
- dev_err(&dev->dev, "pci_enable_device failed in resume\n");
+ netdev_err(dev, "pci_enable_device failed in resume\n");
goto out;
}
pci_set_master(pdev);
@@ -2221,7 +2200,7 @@ static struct pci_driver de_driver = {
static int __init de_init (void)
{
#ifdef MODULE
- printk("%s", version);
+ pr_info("%s\n", version);
#endif
return pci_register_driver(&de_driver);
}
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 4dbd493b996b..45144d5bd11b 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -79,7 +79,7 @@
every usable DECchip board, I pinched Donald's 'next_module' field to
link my modules together.
- Upto 15 EISA cards can be supported under this driver, limited primarily
+ Up to 15 EISA cards can be supported under this driver, limited primarily
by the available IRQ lines. I have checked different configurations of
multiple depca, EtherWORKS 3 cards and de4x5 cards and have not found a
problem yet (provided you have at least depca.c v0.38) ...
@@ -517,7 +517,7 @@ struct mii_phy {
u_int mci; /* 21142 MII Connector Interrupt info */
};
-#define DE4X5_MAX_PHY 8 /* Allow upto 8 attached PHY devices per board */
+#define DE4X5_MAX_PHY 8 /* Allow up to 8 attached PHY devices per board */
struct sia_phy {
u_char mc; /* Media Code */
@@ -1436,7 +1436,7 @@ de4x5_sw_reset(struct net_device *dev)
/* Poll for setup frame completion (adapter interrupts are disabled now) */
- for (j=0, i=0;(i<500) && (j==0);i++) { /* Upto 500ms delay */
+ for (j=0, i=0;(i<500) && (j==0);i++) { /* Up to 500ms delay */
mdelay(1);
if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
}
@@ -1995,7 +1995,7 @@ SetMulticastFilter(struct net_device *dev)
static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
-static int __init de4x5_eisa_probe (struct device *gendev)
+static int __devinit de4x5_eisa_probe (struct device *gendev)
{
struct eisa_device *edev;
u_long iobase;
@@ -2097,7 +2097,7 @@ static int __devexit de4x5_eisa_remove (struct device *device)
return 0;
}
-static struct eisa_device_id de4x5_eisa_ids[] = {
+static const struct eisa_device_id de4x5_eisa_ids[] __devinitconst = {
{ "DEC4250", 0 }, /* 0 is the board name index... */
{ "" }
};
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 7064e035757a..468512731966 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -295,8 +295,7 @@ enum dmfe_CR6_bits {
/* Global variable declaration ----------------------------- */
static int __devinitdata printed_version;
static const char version[] __devinitconst =
- KERN_INFO DRV_NAME ": Davicom DM9xxx net driver, version "
- DRV_VERSION " (" DRV_RELDATE ")\n";
+ "Davicom DM9xxx net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
static int dmfe_debug;
static unsigned char dmfe_media_mode = DMFE_AUTO;
@@ -381,7 +380,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
DMFE_DBUG(0, "dmfe_init_one()", 0);
if (!printed_version++)
- printk(version);
+ pr_info("%s\n", version);
/*
* SPARC on-board DM910x chips should be handled by the main
@@ -406,7 +405,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- pr_warning("32-bit PCI DMA not available\n");
+ pr_warn("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
}
@@ -1224,7 +1223,7 @@ static void dmfe_timer(unsigned long data)
/* If chip reports that link is failed it could be because external
- PHY link status pin is not conected correctly to chip
+ PHY link status pin is not connected correctly to chip
To be sure ask PHY too.
*/
@@ -2203,7 +2202,7 @@ static int __init dmfe_init_module(void)
{
int rc;
- printk(version);
+ pr_info("%s\n", version);
printed_version = 1;
DMFE_DBUG(0, "init_module() ", debug);
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 3031ed9c4a1a..fa5eee925f25 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -115,7 +115,7 @@ static void __devinit tulip_build_fake_mediatable(struct tulip_private *tp)
0x02, /* phy reset sequence length */
0x01, 0x00, /* phy reset sequence */
0x00, 0x78, /* media capabilities */
- 0x00, 0xe0, /* nway advertisment */
+ 0x00, 0xe0, /* nway advertisement */
0x00, 0x05, /* fdx bit map */
0x00, 0x06 /* ttm bit map */
};
@@ -222,8 +222,8 @@ subsequent_board:
/* there is no phy information, don't even try to build mtable */
if (count == 0) {
if (tulip_debug > 0)
- pr_warning("%s: no phy info, aborting mtable build\n",
- dev->name);
+ pr_warn("%s: no phy info, aborting mtable build\n",
+ dev->name);
return;
}
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 0013642903ee..5350d753e0ff 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -125,12 +125,12 @@ int tulip_poll(struct napi_struct *napi, int budget)
#endif
if (tulip_debug > 4)
- printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
- entry, tp->rx_ring[entry].status);
+ netdev_dbg(dev, " In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
do {
if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
- printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
+ netdev_dbg(dev, " In tulip_poll(), hardware disappeared\n");
break;
}
/* Acknowledge current RX interrupt sources. */
@@ -145,9 +145,9 @@ int tulip_poll(struct napi_struct *napi, int budget)
if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
break;
- if (tulip_debug > 5)
- printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
- dev->name, entry, status);
+ if (tulip_debug > 5)
+ netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
+ entry, status);
if (++work_done >= budget)
goto not_done;
@@ -184,9 +184,9 @@ int tulip_poll(struct napi_struct *napi, int budget)
}
} else {
/* There was a fatal error. */
- if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
- dev->name, status);
+ if (tulip_debug > 2)
+ netdev_dbg(dev, "Receive error, Rx status %08x\n",
+ status);
dev->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
(status & RxDescRunt))
@@ -367,16 +367,16 @@ static int tulip_rx(struct net_device *dev)
int received = 0;
if (tulip_debug > 4)
- printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
- entry, tp->rx_ring[entry].status);
+ netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
/* If we own the next entry, it is a new packet. Send it up. */
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
short pkt_len;
if (tulip_debug > 5)
- printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
- dev->name, entry, status);
+ netdev_dbg(dev, "In tulip_rx(), entry %d %08x\n",
+ entry, status);
if (--rx_work_limit < 0)
break;
@@ -404,16 +404,16 @@ static int tulip_rx(struct net_device *dev)
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
- dev_warn(&dev->dev,
- "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
- status);
+ netdev_warn(dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
dev->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
- dev->name, status);
+ netdev_dbg(dev, "Receive error, Rx status %08x\n",
+ status);
dev->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
(status & RxDescRunt))
@@ -573,8 +573,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#endif /* CONFIG_TULIP_NAPI */
if (tulip_debug > 4)
- printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
- dev->name, csr5, ioread32(ioaddr + CSR5));
+ netdev_dbg(dev, "interrupt csr5=%#8.8x new csr5=%#8.8x\n",
+ csr5, ioread32(ioaddr + CSR5));
if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
@@ -605,8 +605,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
/* There was an major error, log it. */
#ifndef final_version
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
- dev->name, status);
+ netdev_dbg(dev, "Transmit error, Tx status %08x\n",
+ status);
#endif
dev->stats.tx_errors++;
if (status & 0x4104)
@@ -804,8 +804,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
}
if (tulip_debug > 4)
- printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
- dev->name, ioread32(ioaddr + CSR5));
+ netdev_dbg(dev, "exiting interrupt, csr5=%#04x\n",
+ ioread32(ioaddr + CSR5));
return IRQ_HANDLED;
}
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index a0c770ee4b64..4bd13922875d 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -182,8 +182,8 @@ void tulip_select_media(struct net_device *dev, int startup)
switch (mleaf->type) {
case 0: /* 21140 non-MII xcvr. */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver with control setting %02x\n",
- dev->name, p[1]);
+ netdev_dbg(dev, "Using a 21140 non-MII transceiver with control setting %02x\n",
+ p[1]);
dev->if_port = p[0];
if (startup)
iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
@@ -204,15 +204,14 @@ void tulip_select_media(struct net_device *dev, int startup)
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Resetting the transceiver\n",
- dev->name);
+ netdev_dbg(dev, "Resetting the transceiver\n");
for (i = 0; i < rst[0]; i++)
iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control %04x/%04x\n",
- dev->name, medianame[dev->if_port],
- setup[0], setup[1]);
+ netdev_dbg(dev, "21143 non-MII %s transceiver control %04x/%04x\n",
+ medianame[dev->if_port],
+ setup[0], setup[1]);
if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
csr13val = setup[0];
csr14val = setup[1];
@@ -239,8 +238,8 @@ void tulip_select_media(struct net_device *dev, int startup)
if (startup) iowrite32(csr13val, ioaddr + CSR13);
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Setting CSR15 to %08x/%08x\n",
- dev->name, csr15dir, csr15val);
+ netdev_dbg(dev, "Setting CSR15 to %08x/%08x\n",
+ csr15dir, csr15val);
if (mleaf->type == 4)
new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
else
@@ -316,9 +315,9 @@ void tulip_select_media(struct net_device *dev, int startup)
if (tp->mii_advertise == 0)
tp->mii_advertise = tp->advertising[phy_num];
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Advertising %04x on MII %d\n",
- dev->name, tp->mii_advertise,
- tp->phys[phy_num]);
+ netdev_dbg(dev, " Advertising %04x on MII %d\n",
+ tp->mii_advertise,
+ tp->phys[phy_num]);
tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
}
break;
@@ -335,8 +334,7 @@ void tulip_select_media(struct net_device *dev, int startup)
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Resetting the transceiver\n",
- dev->name);
+ netdev_dbg(dev, "Resetting the transceiver\n");
for (i = 0; i < rst[0]; i++)
iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
}
@@ -344,20 +342,21 @@ void tulip_select_media(struct net_device *dev, int startup)
break;
}
default:
- printk(KERN_DEBUG "%s: Invalid media table selection %d\n",
- dev->name, mleaf->type);
+ netdev_dbg(dev, " Invalid media table selection %d\n",
+ mleaf->type);
new_csr6 = 0x020E0000;
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %02x\n",
- dev->name, medianame[dev->if_port],
+ netdev_dbg(dev, "Using media type %s, CSR12 is %02x\n",
+ medianame[dev->if_port],
ioread32(ioaddr + CSR12) & 0xff);
} else if (tp->chip_id == LC82C168) {
if (startup && ! tp->medialock)
dev->if_port = tp->mii_cnt ? 11 : 0;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s\n",
- dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
+ netdev_dbg(dev, "PNIC PHY status is %3.3x, media %s\n",
+ ioread32(ioaddr + 0xB8),
+ medianame[dev->if_port]);
if (tp->mii_cnt) {
new_csr6 = 0x810C0000;
iowrite32(0x0001, ioaddr + CSR15);
@@ -388,9 +387,9 @@ void tulip_select_media(struct net_device *dev, int startup)
} else
new_csr6 = 0x03860000;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: No media description table, assuming %s transceiver, CSR12 %02x\n",
- dev->name, medianame[dev->if_port],
- ioread32(ioaddr + CSR12));
+ netdev_dbg(dev, "No media description table, assuming %s transceiver, CSR12 %02x\n",
+ medianame[dev->if_port],
+ ioread32(ioaddr + CSR12));
}
tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
@@ -504,8 +503,8 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
/* Fixup for DLink with miswired PHY. */
if (mii_advert != to_advert) {
- printk(KERN_DEBUG "tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
- board_idx, to_advert, phy, mii_advert);
+ pr_debug("tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
+ board_idx, to_advert, phy, mii_advert);
tulip_mdio_write (dev, phy, 4, to_advert);
}
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index a63e64b6863d..aa4d9dad0395 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -40,8 +40,8 @@ void pnic_do_nway(struct net_device *dev)
new_csr6 |= 0x00000200;
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC autonegotiated status %08x, %s\n",
- dev->name, phy_reg, medianame[dev->if_port]);
+ netdev_dbg(dev, "PNIC autonegotiated status %08x, %s\n",
+ phy_reg, medianame[dev->if_port]);
if (tp->csr6 != new_csr6) {
tp->csr6 = new_csr6;
/* Restart Tx */
@@ -58,8 +58,8 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
int phy_reg = ioread32(ioaddr + 0xB8);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC link changed state %08x, CSR5 %08x\n",
- dev->name, phy_reg, csr5);
+ netdev_dbg(dev, "PNIC link changed state %08x, CSR5 %08x\n",
+ phy_reg, csr5);
if (ioread32(ioaddr + CSR5) & TPLnkFail) {
iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
/* If we use an external MII, then we mustn't use the
@@ -114,8 +114,8 @@ void pnic_timer(unsigned long data)
int csr5 = ioread32(ioaddr + CSR5);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC timer PHY status %08x, %s CSR5 %08x\n",
- dev->name, phy_reg, medianame[dev->if_port], csr5);
+ netdev_dbg(dev, "PNIC timer PHY status %08x, %s CSR5 %08x\n",
+ phy_reg, medianame[dev->if_port], csr5);
if (phy_reg & 0x04000000) { /* Remote link fault */
iowrite32(0x0201F078, ioaddr + 0xB8);
next_tick = 1*HZ;
@@ -125,11 +125,11 @@ void pnic_timer(unsigned long data)
next_tick = 60*HZ;
} else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
- dev->name, medianame[dev->if_port],
- csr12,
- ioread32(ioaddr + CSR5),
- ioread32(ioaddr + 0xB8));
+ netdev_dbg(dev, "%s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
+ medianame[dev->if_port],
+ csr12,
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + 0xB8));
next_tick = 3*HZ;
if (tp->medialock) {
} else if (tp->nwayset && (dev->if_port & 1)) {
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index 4690c8e69207..93358ee4d830 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -125,8 +125,8 @@ void pnic2_start_nway(struct net_device *dev)
csr14 |= 0x00001184;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, csr14=%08x\n",
- dev->name, csr14);
+ netdev_dbg(dev, "Restarting PNIC2 autonegotiation, csr14=%08x\n",
+ csr14);
/* tell pnic2_lnk_change we are doing an nway negotiation */
dev->if_port = 0;
@@ -137,8 +137,7 @@ void pnic2_start_nway(struct net_device *dev)
tp->csr6 = ioread32(ioaddr + CSR6);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: On Entry to Nway, csr6=%08x\n",
- dev->name, tp->csr6);
+ netdev_dbg(dev, "On Entry to Nway, csr6=%08x\n", tp->csr6);
/* mask off any bits not to touch
* comment at top of file explains mask value
@@ -271,9 +270,10 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
iowrite32(1, ioaddr + CSR13);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
- dev->name, tp->csr6,
- ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
+ netdev_dbg(dev, "Setting CSR6 %08x/%x CSR12 %08x\n",
+ tp->csr6,
+ ioread32(ioaddr + CSR6),
+ ioread32(ioaddr + CSR12));
/* now the following actually writes out the
* new csr6 values
@@ -324,7 +324,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* Link blew? Maybe restart NWay. */
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Ugh! Link blew?\n", dev->name);
+ netdev_dbg(dev, "Ugh! Link blew?\n");
del_timer_sync(&tp->timer);
pnic2_start_nway(dev);
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 36c2725ec886..2017faf2d0e6 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -28,11 +28,11 @@ void tulip_media_task(struct work_struct *work)
unsigned long flags;
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
- dev->name, medianame[dev->if_port],
- ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
- csr12, ioread32(ioaddr + CSR13),
- ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ netdev_dbg(dev, "Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
+ medianame[dev->if_port],
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
+ csr12, ioread32(ioaddr + CSR13),
+ ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
}
switch (tp->chip_id) {
case DC21140:
@@ -48,9 +48,9 @@ void tulip_media_task(struct work_struct *work)
Assume this a generic MII or SYM transceiver. */
next_tick = 60*HZ;
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: network media monitor CSR6 %08x CSR12 0x%02x\n",
- dev->name,
- ioread32(ioaddr + CSR6), csr12 & 0xff);
+ netdev_dbg(dev, "network media monitor CSR6 %08x CSR12 0x%02x\n",
+ ioread32(ioaddr + CSR6),
+ csr12 & 0xff);
break;
}
mleaf = &tp->mtable->mleaf[tp->cur_index];
@@ -62,8 +62,8 @@ void tulip_media_task(struct work_struct *work)
s8 bitnum = p[offset];
if (p[offset+1] & 0x80) {
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Transceiver monitor tick CSR12=%#02x, no media sense\n",
- dev->name, csr12);
+ netdev_dbg(dev, "Transceiver monitor tick CSR12=%#02x, no media sense\n",
+ csr12);
if (mleaf->type == 4) {
if (mleaf->media == 3 && (csr12 & 0x02))
goto select_next_media;
@@ -71,17 +71,16 @@ void tulip_media_task(struct work_struct *work)
break;
}
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
- dev->name, csr12, (bitnum >> 1) & 7,
- (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
- (bitnum >= 0));
+ netdev_dbg(dev, "Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
+ csr12, (bitnum >> 1) & 7,
+ (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+ (bitnum >= 0));
/* Check that the specified bit has the proper value. */
if ((bitnum < 0) !=
((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Link beat detected for %s\n",
- dev->name,
- medianame[mleaf->media & MEDIA_MASK]);
+ netdev_dbg(dev, "Link beat detected for %s\n",
+ medianame[mleaf->media & MEDIA_MASK]);
if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
goto actually_mii;
netif_carrier_on(dev);
@@ -99,10 +98,9 @@ void tulip_media_task(struct work_struct *work)
if (tulip_media_cap[dev->if_port] & MediaIsFD)
goto select_next_media; /* Skip FD entries. */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: No link beat on media %s, trying transceiver type %s\n",
- dev->name,
- medianame[mleaf->media & MEDIA_MASK],
- medianame[tp->mtable->mleaf[tp->cur_index].media]);
+ netdev_dbg(dev, "No link beat on media %s, trying transceiver type %s\n",
+ medianame[mleaf->media & MEDIA_MASK],
+ medianame[tp->mtable->mleaf[tp->cur_index].media]);
tulip_select_media(dev, 0);
/* Restart the transmit process. */
tulip_restart_rxtx(tp);
@@ -166,10 +164,9 @@ void comet_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Comet link status %04x partner capability %04x\n",
- dev->name,
- tulip_mdio_read(dev, tp->phys[0], 1),
- tulip_mdio_read(dev, tp->phys[0], 5));
+ netdev_dbg(dev, "Comet link status %04x partner capability %04x\n",
+ tulip_mdio_read(dev, tp->phys[0], 1),
+ tulip_mdio_read(dev, tp->phys[0], 5));
/* mod_timer synchronizes us with potential add_timer calls
* from interrupts.
*/
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index ed66a16711dc..9db528967da9 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -547,11 +547,9 @@ static inline void tulip_stop_rxtx(struct tulip_private *tp)
udelay(10);
if (!i)
- printk(KERN_DEBUG "%s: tulip_stop_rxtx() failed"
- " (CSR5 0x%x CSR6 0x%x)\n",
- pci_name(tp->pdev),
- ioread32(ioaddr + CSR5),
- ioread32(ioaddr + CSR6));
+ netdev_dbg(tp->dev, "tulip_stop_rxtx() failed (CSR5 0x%x CSR6 0x%x)\n",
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + CSR6));
}
}
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 5c01e260f1ba..82f87647207e 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -12,6 +12,7 @@
Please submit bugs to http://bugzilla.kernel.org/ .
*/
+#define pr_fmt(fmt) "tulip: " fmt
#define DRV_NAME "tulip"
#ifdef CONFIG_TULIP_NAPI
@@ -119,8 +120,6 @@ module_param(csr0, int, 0);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
-#define PFX DRV_NAME ": "
-
#ifdef TULIP_DEBUG
int tulip_debug = TULIP_DEBUG;
#else
@@ -331,8 +330,7 @@ static void tulip_up(struct net_device *dev)
udelay(100);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n",
- dev->name, dev->irq);
+ netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq);
iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -499,10 +497,10 @@ media_picked:
iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
- dev->name, ioread32(ioaddr + CSR0),
- ioread32(ioaddr + CSR5),
- ioread32(ioaddr + CSR6));
+ netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
+ ioread32(ioaddr + CSR0),
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + CSR6));
}
/* Set the timer to switch to check for link beat and perhaps switch
@@ -843,8 +841,7 @@ static int tulip_close (struct net_device *dev)
tulip_down (dev);
if (tulip_debug > 1)
- dev_printk(KERN_DEBUG, &dev->dev,
- "Shutting down ethercard, status was %02x\n",
+ netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
ioread32 (ioaddr + CSR5));
free_irq (dev->irq, dev);
@@ -1207,7 +1204,7 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev,
u32 csr0;
if (tulip_debug > 3)
- printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev));
+ netdev_dbg(dev, "tulip_mwi_config()\n");
tp->csr0 = csr0 = 0;
@@ -1269,8 +1266,8 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev,
out:
tp->csr0 = csr0;
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
- pci_name(pdev), cache, csr0);
+ netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
+ cache, csr0);
}
#endif
@@ -1340,13 +1337,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
*/
if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
- pr_err(PFX "skipping LMC card\n");
+ pr_err("skipping LMC card\n");
return -ENODEV;
} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
(pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
- pr_err(PFX "skipping SBE T3E3 port\n");
+ pr_err("skipping SBE T3E3 port\n");
return -ENODEV;
}
@@ -1362,13 +1359,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
pdev->revision < 0x30) {
- pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+ pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
return -ENODEV;
}
dp = pci_device_to_OF_node(pdev);
if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
- pr_info(PFX "skipping DM910x expansion card (use dmfe)\n");
+ pr_info("skipping DM910x expansion card (use dmfe)\n");
return -ENODEV;
}
}
@@ -1415,16 +1412,14 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
i = pci_enable_device(pdev);
if (i) {
- pr_err(PFX "Cannot enable tulip board #%d, aborting\n",
- board_idx);
+ pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
return i;
}
/* The chip will fail to enter a low-power state later unless
* first explicitly commanded into D0 */
if (pci_set_power_state(pdev, PCI_D0)) {
- printk (KERN_NOTICE PFX
- "Failed to set power state to D0\n");
+ pr_notice("Failed to set power state to D0\n");
}
irq = pdev->irq;
@@ -1432,13 +1427,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
if (!dev) {
- pr_err(PFX "ether device alloc failed, aborting\n");
+ pr_err("ether device alloc failed, aborting\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
- pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
+ pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
pci_name(pdev),
(unsigned long long)pci_resource_len (pdev, 0),
(unsigned long long)pci_resource_start (pdev, 0));
@@ -1483,7 +1478,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (sig == 0x09811317) {
tp->flags |= COMET_PM;
tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
- printk(KERN_INFO "tulip_init_one: Enabled WOL support for AN983B\n");
+ pr_info("%s: Enabled WOL support for AN983B\n",
+ __func__);
}
}
tp->pdev = pdev;
@@ -1879,7 +1875,7 @@ save_state:
tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
if (rc)
- printk("tulip: pci_enable_wake failed (%d)\n", rc);
+ pr_err("pci_enable_wake failed (%d)\n", rc);
}
pci_set_power_state(pdev, pstate);
@@ -1905,12 +1901,12 @@ static int tulip_resume(struct pci_dev *pdev)
return 0;
if ((retval = pci_enable_device(pdev))) {
- pr_err(PFX "pci_enable_device failed in resume\n");
+ pr_err("pci_enable_device failed in resume\n");
return retval;
}
if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
- pr_err(PFX "request_irq failed in resume\n");
+ pr_err("request_irq failed in resume\n");
return retval;
}
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 74217dbf0143..9e63f406f72d 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -209,8 +209,7 @@ enum uli526x_CR6_bits {
/* Global variable declaration ----------------------------- */
static int __devinitdata printed_version;
static const char version[] __devinitconst =
- KERN_INFO DRV_NAME ": ULi M5261/M5263 net driver, version "
- DRV_VERSION " (" DRV_RELDATE ")\n";
+ "ULi M5261/M5263 net driver, version " DRV_VERSION " (" DRV_RELDATE ")";
static int uli526x_debug;
static unsigned char uli526x_media_mode = ULI526X_AUTO;
@@ -283,7 +282,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
ULI526X_DBUG(0, "uli526x_init_one()", 0);
if (!printed_version++)
- printk(version);
+ pr_info("%s\n", version);
/* Init network device */
dev = alloc_etherdev(sizeof(*db));
@@ -292,7 +291,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- pr_warning("32-bit PCI DMA not available\n");
+ pr_warn("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
}
@@ -390,9 +389,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
if (err)
goto err_out_res;
- dev_info(&dev->dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
- ent->driver_data >> 16, pci_name(pdev),
- dev->dev_addr, dev->irq);
+ netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16, pci_name(pdev),
+ dev->dev_addr, dev->irq);
pci_set_master(pdev);
@@ -524,7 +523,7 @@ static void uli526x_init(struct net_device *dev)
}
}
if(phy_tmp == 32)
- pr_warning("Can not find the phy address!!!");
+ pr_warn("Can not find the phy address!!!\n");
/* Parser SROM and media mode */
db->media_mode = uli526x_media_mode;
@@ -590,7 +589,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
- pr_err("big packet = %d\n", (u16)skb->len);
+ netdev_err(dev, "big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -600,7 +599,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* No Tx resource check, it never happen nromally */
if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- pr_err("No Tx resource %ld\n", db->tx_packet_cnt);
+ netdev_err(dev, "No Tx resource %ld\n", db->tx_packet_cnt);
return NETDEV_TX_BUSY;
}
@@ -667,15 +666,6 @@ static int uli526x_stop(struct net_device *dev)
/* free allocated rx buffer */
uli526x_free_rxbuffer(db);
-#if 0
- /* show statistic counter */
- printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
- db->tx_fifo_underrun, db->tx_excessive_collision,
- db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
- db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
- db->reset_fatal, db->reset_TXtimeout);
-#endif
-
return 0;
}
@@ -755,7 +745,6 @@ static void uli526x_free_tx_pkt(struct net_device *dev,
txptr = db->tx_remove_ptr;
while(db->tx_packet_cnt) {
tdes0 = le32_to_cpu(txptr->tdes0);
- /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
if (tdes0 & 0x80000000)
break;
@@ -765,7 +754,6 @@ static void uli526x_free_tx_pkt(struct net_device *dev,
/* Transmit statistic counter */
if ( tdes0 != 0x7fffffff ) {
- /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
dev->stats.collisions += (tdes0 >> 3) & 0xf;
dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
if (tdes0 & TDES0_ERR_MASK) {
@@ -838,7 +826,6 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
/* error summary bit check */
if (rdes0 & 0x8000) {
/* This is a error packet */
- //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
dev->stats.rx_errors++;
if (rdes0 & 1)
dev->stats.rx_fifo_errors++;
@@ -945,12 +932,12 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_EXTERNAL;
- ecmd->speed = 10;
+ ethtool_cmd_speed_set(ecmd, SPEED_10);
ecmd->duplex = DUPLEX_HALF;
if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
{
- ecmd->speed = 100;
+ ethtool_cmd_speed_set(ecmd, SPEED_100);
}
if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
{
@@ -958,7 +945,7 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
}
if(db->link_failed)
{
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
@@ -1024,7 +1011,6 @@ static void uli526x_timer(unsigned long data)
struct net_device *dev = (struct net_device *) data;
struct uli526x_board_info *db = netdev_priv(dev);
unsigned long flags;
- u8 TmpSpeed=10;
//ULI526X_DBUG(0, "uli526x_timer()", 0);
spin_lock_irqsave(&db->lock, flags);
@@ -1047,8 +1033,7 @@ static void uli526x_timer(unsigned long data)
if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
- printk( "%s: Tx timeout - resetting\n",
- dev->name);
+ netdev_err(dev, " Tx timeout - resetting\n");
}
}
@@ -1070,7 +1055,7 @@ static void uli526x_timer(unsigned long data)
/* Link Failed */
ULI526X_DBUG(0, "Link Failed", tmp_cr12);
netif_carrier_off(dev);
- pr_info("%s NIC Link is Down\n",dev->name);
+ netdev_info(dev, "NIC Link is Down\n");
db->link_failed = 1;
/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
@@ -1096,18 +1081,13 @@ static void uli526x_timer(unsigned long data)
if(db->link_failed==0)
{
- if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
- {
- TmpSpeed = 100;
- }
- if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
- {
- pr_info("%s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
- }
- else
- {
- pr_info("%s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
- }
+ netdev_info(dev, "NIC Link is Up %d Mbps %s duplex\n",
+ (db->op_mode == ULI526X_100MHF ||
+ db->op_mode == ULI526X_100MFD)
+ ? 100 : 10,
+ (db->op_mode == ULI526X_10MFD ||
+ db->op_mode == ULI526X_100MFD)
+ ? "Full" : "Half");
netif_carrier_on(dev);
}
/* SHOW_MEDIA_TYPE(db->op_mode); */
@@ -1116,7 +1096,7 @@ static void uli526x_timer(unsigned long data)
{
if(db->init==1)
{
- pr_info("%s NIC Link is Down\n",dev->name);
+ netdev_info(dev, "NIC Link is Down\n");
netif_carrier_off(dev);
}
}
@@ -1242,7 +1222,7 @@ static int uli526x_resume(struct pci_dev *pdev)
err = pci_set_power_state(pdev, PCI_D0);
if (err) {
- dev_warn(&dev->dev, "Could not put device into D0\n");
+ netdev_warn(dev, "Could not put device into D0\n");
return err;
}
@@ -1443,7 +1423,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
update_cr6(db->cr6_data, dev->base_addr);
dev->trans_start = jiffies;
} else
- pr_err("No Tx resource - Send_filter_frame!\n");
+ netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
}
@@ -1540,7 +1520,6 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
else
phy_mode = 0x1000;
- /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
switch (phy_mode) {
case 0x1000: db->op_mode = ULI526X_10MHF; break;
case 0x2000: db->op_mode = ULI526X_10MFD; break;
@@ -1829,7 +1808,7 @@ MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8
static int __init uli526x_init_module(void)
{
- printk(version);
+ pr_info("%s\n", version);
printed_version = 1;
ULI526X_DBUG(0, "init_module() ", debug);
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index f0b231035dee..862eadf07191 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -44,6 +44,8 @@
* Wake-On-LAN
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "winbond-840"
#define DRV_VERSION "1.01-e"
#define DRV_RELDATE "Sep-11-2006"
@@ -139,7 +141,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
/* These identify the driver base version and may not be removed. */
static const char version[] __initconst =
- KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) "
+ "v" DRV_VERSION " (2.4 port) "
DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
" http://www.scyld.com/network/drivers.html\n";
@@ -375,8 +377,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
irq = pdev->irq;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- pr_warning("Winbond-840: Device %s disabled due to DMA limitations\n",
- pci_name(pdev));
+ pr_warn("Device %s disabled due to DMA limitations\n",
+ pci_name(pdev));
return -EIO;
}
dev = alloc_etherdev(sizeof(*np));
@@ -643,8 +645,7 @@ static int netdev_open(struct net_device *dev)
goto out_err;
if (debug > 1)
- printk(KERN_DEBUG "%s: w89c840_open() irq %d\n",
- dev->name, dev->irq);
+ netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq);
if((i=alloc_ringdesc(dev)))
goto out_err;
@@ -656,7 +657,7 @@ static int netdev_open(struct net_device *dev)
netif_start_queue(dev);
if (debug > 2)
- printk(KERN_DEBUG "%s: Done netdev_open()\n", dev->name);
+ netdev_dbg(dev, "Done netdev_open()\n");
/* Set the timer to check for link beat. */
init_timer(&np->timer);
@@ -785,9 +786,9 @@ static void netdev_timer(unsigned long data)
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
- printk(KERN_DEBUG "%s: Media selection timer tick, status %08x config %08x\n",
- dev->name, ioread32(ioaddr + IntrStatus),
- ioread32(ioaddr + NetworkConfig));
+ netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
+ ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
spin_lock_irq(&np->lock);
update_csr6(dev, update_link(dev));
spin_unlock_irq(&np->lock);
@@ -1054,8 +1055,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irq(&np->lock);
if (debug > 4) {
- printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
- dev->name, np->cur_tx, entry);
+ netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
+ np->cur_tx, entry);
}
return NETDEV_TX_OK;
}
@@ -1072,8 +1073,8 @@ static void netdev_tx_done(struct net_device *dev)
if (tx_status & 0x8000) { /* There was an error, log it. */
#ifndef final_version
if (debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
- dev->name, tx_status);
+ netdev_dbg(dev, "Transmit error, Tx status %08x\n",
+ tx_status);
#endif
np->stats.tx_errors++;
if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
@@ -1085,8 +1086,8 @@ static void netdev_tx_done(struct net_device *dev)
} else {
#ifndef final_version
if (debug > 3)
- printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %08x\n",
- dev->name, entry, tx_status);
+ netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
+ entry, tx_status);
#endif
np->stats.tx_bytes += np->tx_skbuff[entry]->len;
np->stats.collisions += (tx_status >> 3) & 15;
@@ -1129,8 +1130,7 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
if (debug > 4)
- printk(KERN_DEBUG "%s: Interrupt, status %04x\n",
- dev->name, intr_status);
+ netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
break;
@@ -1171,8 +1171,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
} while (1);
if (debug > 3)
- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x\n",
- dev->name, ioread32(ioaddr + IntrStatus));
+ netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
+ ioread32(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -1185,8 +1185,8 @@ static int netdev_rx(struct net_device *dev)
int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
if (debug > 4) {
- printk(KERN_DEBUG " In netdev_rx(), entry %d status %04x\n",
- entry, np->rx_ring[entry].status);
+ netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
+ entry, np->rx_ring[entry].status);
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
@@ -1195,8 +1195,8 @@ static int netdev_rx(struct net_device *dev)
s32 status = desc->status;
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() status was %08x\n",
- status);
+ netdev_dbg(dev, " netdev_rx() status was %08x\n",
+ status);
if (status < 0)
break;
if ((status & 0x38008300) != 0x0300) {
@@ -1211,8 +1211,8 @@ static int netdev_rx(struct net_device *dev)
} else if (status & 0x8000) {
/* There was a fatal error. */
if (debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
- dev->name, status);
+ netdev_dbg(dev, "Receive error, Rx status %08x\n",
+ status);
np->stats.rx_errors++; /* end of a packet.*/
if (status & 0x0890) np->stats.rx_length_errors++;
if (status & 0x004C) np->stats.rx_frame_errors++;
@@ -1225,8 +1225,8 @@ static int netdev_rx(struct net_device *dev)
#ifndef final_version
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d status %x\n",
- pkt_len, status);
+ netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
+ pkt_len, status);
#endif
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
@@ -1251,10 +1251,10 @@ static int netdev_rx(struct net_device *dev)
#ifndef final_version /* Remove after testing. */
/* You will want this info for the initial debug. */
if (debug > 5)
- printk(KERN_DEBUG " Rx data %pM %pM %02x%02x %pI4\n",
- &skb->data[0], &skb->data[6],
- skb->data[12], skb->data[13],
- &skb->data[14]);
+ netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
+ &skb->data[0], &skb->data[6],
+ skb->data[12], skb->data[13],
+ &skb->data[14]);
#endif
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -1292,8 +1292,7 @@ static void netdev_error(struct net_device *dev, int intr_status)
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
- printk(KERN_DEBUG "%s: Abnormal event, %08x\n",
- dev->name, intr_status);
+ netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
if (intr_status == 0xffffffff)
return;
spin_lock(&np->lock);
@@ -1313,8 +1312,7 @@ static void netdev_error(struct net_device *dev, int intr_status)
new = 127; /* load full packet before starting */
new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
#endif
- printk(KERN_DEBUG "%s: Tx underflow, new csr6 %08x\n",
- dev->name, new);
+ netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
update_csr6(dev, new);
}
if (intr_status & RxDied) { /* Missed a Rx frame. */
@@ -1487,13 +1485,12 @@ static int netdev_close(struct net_device *dev)
netif_stop_queue(dev);
if (debug > 1) {
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %08x Config %08x\n",
- dev->name, ioread32(ioaddr + IntrStatus),
- ioread32(ioaddr + NetworkConfig));
- printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d\n",
- dev->name,
- np->cur_tx, np->dirty_tx,
- np->cur_rx, np->dirty_rx);
+ netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
+ ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
+ netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
+ np->cur_tx, np->dirty_tx,
+ np->cur_rx, np->dirty_rx);
}
/* Stop the chip's Tx and Rx processes. */
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 5a73752be2ca..988b8eb24d37 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -37,15 +37,6 @@
#include <asm/irq.h>
#endif
-#ifdef DEBUG
-#define enter(x) printk("Enter: %s, %s line %i\n",x,__FILE__,__LINE__)
-#define leave(x) printk("Leave: %s, %s line %i\n",x,__FILE__,__LINE__)
-#else
-#define enter(x) do {} while (0)
-#define leave(x) do {} while (0)
-#endif
-
-
MODULE_DESCRIPTION("Xircom Cardbus ethernet driver");
MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
MODULE_LICENSE("GPL");
@@ -161,7 +152,7 @@ static struct pci_driver xircom_ops = {
};
-#ifdef DEBUG
+#if defined DEBUG && DEBUG > 1
static void print_binary(unsigned int number)
{
int i,i2;
@@ -176,7 +167,7 @@ static void print_binary(unsigned int number)
if ((i&3)==0)
buffer[i2++]=' ';
}
- printk("%s\n",buffer);
+ pr_debug("%s\n",buffer);
}
#endif
@@ -205,7 +196,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
struct xircom_private *private;
unsigned long flags;
unsigned short tmp16;
- enter("xircom_probe");
/* First do the PCI initialisation */
@@ -272,8 +262,8 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
goto reg_fail;
}
- dev_info(&dev->dev, "Xircom cardbus revision %i at irq %i\n",
- pdev->revision, pdev->irq);
+ netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
+ pdev->revision, pdev->irq);
/* start the transmitter to get a heartbeat */
/* TODO: send 2 dummy packets here */
transceiver_voodoo(private);
@@ -285,7 +275,6 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
trigger_receive(private);
- leave("xircom_probe");
return 0;
reg_fail:
@@ -310,7 +299,6 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct xircom_private *card = netdev_priv(dev);
- enter("xircom_remove");
pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
@@ -318,7 +306,6 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
unregister_netdev(dev);
free_netdev(dev);
pci_set_drvdata(pdev, NULL);
- leave("xircom_remove");
}
static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
@@ -328,17 +315,15 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
unsigned int status;
int i;
- enter("xircom_interrupt\n");
-
spin_lock(&card->lock);
status = inl(card->io_port+CSR5);
-#ifdef DEBUG
+#if defined DEBUG && DEBUG > 1
print_binary(status);
- printk("tx status 0x%08x 0x%08x\n",
- card->tx_buffer[0], card->tx_buffer[4]);
- printk("rx status 0x%08x 0x%08x\n",
- card->rx_buffer[0], card->rx_buffer[4]);
+ pr_debug("tx status 0x%08x 0x%08x\n",
+ card->tx_buffer[0], card->tx_buffer[4]);
+ pr_debug("rx status 0x%08x 0x%08x\n",
+ card->rx_buffer[0], card->rx_buffer[4]);
#endif
/* Handle shared irq and hotplug */
if (status == 0 || status == 0xffffffff) {
@@ -348,9 +333,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
if (link_status_changed(card)) {
int newlink;
- printk(KERN_DEBUG "xircom_cb: Link status has changed\n");
+ netdev_dbg(dev, "Link status has changed\n");
newlink = link_status(card);
- dev_info(&dev->dev, "Link is %i mbit\n", newlink);
+ netdev_info(dev, "Link is %d mbit\n", newlink);
if (newlink)
netif_carrier_on(dev);
else
@@ -369,9 +354,7 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
for (i=0;i<NUMDESCRIPTORS;i++)
investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
-
spin_unlock(&card->lock);
- leave("xircom_interrupt");
return IRQ_HANDLED;
}
@@ -382,7 +365,6 @@ static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
unsigned long flags;
int nextdescriptor;
int desc;
- enter("xircom_start_xmit");
card = netdev_priv(dev);
spin_lock_irqsave(&card->lock,flags);
@@ -424,13 +406,10 @@ static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
netif_stop_queue(dev);
}
card->transmit_used = nextdescriptor;
- leave("xircom-start_xmit - sent");
spin_unlock_irqrestore(&card->lock,flags);
return NETDEV_TX_OK;
}
-
-
/* Uh oh... no free descriptor... drop the packet */
netif_stop_queue(dev);
spin_unlock_irqrestore(&card->lock,flags);
@@ -446,18 +425,16 @@ static int xircom_open(struct net_device *dev)
{
struct xircom_private *xp = netdev_priv(dev);
int retval;
- enter("xircom_open");
- pr_info("xircom cardbus adaptor found, registering as %s, using irq %i\n",
- dev->name, dev->irq);
+
+ netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n",
+ dev->irq);
retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
- if (retval) {
- leave("xircom_open - No IRQ");
+ if (retval)
return retval;
- }
xircom_up(xp);
xp->open = 1;
- leave("xircom_open");
+
return 0;
}
@@ -466,7 +443,6 @@ static int xircom_close(struct net_device *dev)
struct xircom_private *card;
unsigned long flags;
- enter("xircom_close");
card = netdev_priv(dev);
netif_stop_queue(dev); /* we don't want new packets */
@@ -486,8 +462,6 @@ static int xircom_close(struct net_device *dev)
card->open = 0;
free_irq(dev->irq,dev);
- leave("xircom_close");
-
return 0;
}
@@ -507,8 +481,6 @@ static void initialize_card(struct xircom_private *card)
{
unsigned int val;
unsigned long flags;
- enter("initialize_card");
-
spin_lock_irqsave(&card->lock, flags);
@@ -534,8 +506,6 @@ static void initialize_card(struct xircom_private *card)
deactivate_transmitter(card);
spin_unlock_irqrestore(&card->lock, flags);
-
- leave("initialize_card");
}
/*
@@ -547,12 +517,9 @@ ignored; I chose zero.
static void trigger_transmit(struct xircom_private *card)
{
unsigned int val;
- enter("trigger_transmit");
val = 0;
outl(val, card->io_port + CSR1);
-
- leave("trigger_transmit");
}
/*
@@ -565,12 +532,9 @@ ignored; I chose zero.
static void trigger_receive(struct xircom_private *card)
{
unsigned int val;
- enter("trigger_receive");
val = 0;
outl(val, card->io_port + CSR2);
-
- leave("trigger_receive");
}
/*
@@ -581,8 +545,6 @@ static void setup_descriptors(struct xircom_private *card)
{
u32 address;
int i;
- enter("setup_descriptors");
-
BUG_ON(card->rx_buffer == NULL);
BUG_ON(card->tx_buffer == NULL);
@@ -636,8 +598,6 @@ static void setup_descriptors(struct xircom_private *card)
/* wite the transmit descriptor ring to the card */
address = card->tx_dma_handle;
outl(address, card->io_port + CSR4); /* xmit descr list address */
-
- leave("setup_descriptors");
}
/*
@@ -647,13 +607,10 @@ valid by setting the address in the card to 0x00.
static void remove_descriptors(struct xircom_private *card)
{
unsigned int val;
- enter("remove_descriptors");
val = 0;
outl(val, card->io_port + CSR3); /* Receive descriptor address */
outl(val, card->io_port + CSR4); /* Send descriptor address */
-
- leave("remove_descriptors");
}
/*
@@ -665,21 +622,17 @@ This function also clears the status-bit.
static int link_status_changed(struct xircom_private *card)
{
unsigned int val;
- enter("link_status_changed");
val = inl(card->io_port + CSR5); /* Status register */
- if ((val & (1 << 27)) == 0) { /* no change */
- leave("link_status_changed - nochange");
+ if ((val & (1 << 27)) == 0) /* no change */
return 0;
- }
/* clear the event by writing a 1 to the bit in the
status register. */
val = (1 << 27);
outl(val, card->io_port + CSR5);
- leave("link_status_changed - changed");
return 1;
}
@@ -691,16 +644,12 @@ in a non-stopped state.
static int transmit_active(struct xircom_private *card)
{
unsigned int val;
- enter("transmit_active");
val = inl(card->io_port + CSR5); /* Status register */
- if ((val & (7 << 20)) == 0) { /* transmitter disabled */
- leave("transmit_active - inactive");
+ if ((val & (7 << 20)) == 0) /* transmitter disabled */
return 0;
- }
- leave("transmit_active - active");
return 1;
}
@@ -711,17 +660,12 @@ in a non-stopped state.
static int receive_active(struct xircom_private *card)
{
unsigned int val;
- enter("receive_active");
-
val = inl(card->io_port + CSR5); /* Status register */
- if ((val & (7 << 17)) == 0) { /* receiver disabled */
- leave("receive_active - inactive");
+ if ((val & (7 << 17)) == 0) /* receiver disabled */
return 0;
- }
- leave("receive_active - active");
return 1;
}
@@ -739,8 +683,6 @@ static void activate_receiver(struct xircom_private *card)
{
unsigned int val;
int counter;
- enter("activate_receiver");
-
val = inl(card->io_port + CSR6); /* Operation mode */
@@ -761,7 +703,7 @@ static void activate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- pr_err("Receiver failed to deactivate\n");
+ netdev_err(card->dev, "Receiver failed to deactivate\n");
}
/* enable the receiver */
@@ -778,10 +720,9 @@ static void activate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- pr_err("Receiver failed to re-activate\n");
+ netdev_err(card->dev,
+ "Receiver failed to re-activate\n");
}
-
- leave("activate_receiver");
}
/*
@@ -795,7 +736,6 @@ static void deactivate_receiver(struct xircom_private *card)
{
unsigned int val;
int counter;
- enter("deactivate_receiver");
val = inl(card->io_port + CSR6); /* Operation mode */
val = val & ~2; /* disable the receiver */
@@ -809,11 +749,8 @@ static void deactivate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- pr_err("Receiver failed to deactivate\n");
+ netdev_err(card->dev, "Receiver failed to deactivate\n");
}
-
-
- leave("deactivate_receiver");
}
@@ -831,8 +768,6 @@ static void activate_transmitter(struct xircom_private *card)
{
unsigned int val;
int counter;
- enter("activate_transmitter");
-
val = inl(card->io_port + CSR6); /* Operation mode */
@@ -852,7 +787,8 @@ static void activate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- pr_err("Transmitter failed to deactivate\n");
+ netdev_err(card->dev,
+ "Transmitter failed to deactivate\n");
}
/* enable the transmitter */
@@ -869,10 +805,9 @@ static void activate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- pr_err("Transmitter failed to re-activate\n");
+ netdev_err(card->dev,
+ "Transmitter failed to re-activate\n");
}
-
- leave("activate_transmitter");
}
/*
@@ -886,7 +821,6 @@ static void deactivate_transmitter(struct xircom_private *card)
{
unsigned int val;
int counter;
- enter("deactivate_transmitter");
val = inl(card->io_port + CSR6); /* Operation mode */
val = val & ~2; /* disable the transmitter */
@@ -900,11 +834,9 @@ static void deactivate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- pr_err("Transmitter failed to deactivate\n");
+ netdev_err(card->dev,
+ "Transmitter failed to deactivate\n");
}
-
-
- leave("deactivate_transmitter");
}
@@ -916,13 +848,10 @@ must be called with the lock held and interrupts disabled.
static void enable_transmit_interrupt(struct xircom_private *card)
{
unsigned int val;
- enter("enable_transmit_interrupt");
val = inl(card->io_port + CSR7); /* Interrupt enable register */
val |= 1; /* enable the transmit interrupt */
outl(val, card->io_port + CSR7);
-
- leave("enable_transmit_interrupt");
}
@@ -934,13 +863,10 @@ must be called with the lock held and interrupts disabled.
static void enable_receive_interrupt(struct xircom_private *card)
{
unsigned int val;
- enter("enable_receive_interrupt");
val = inl(card->io_port + CSR7); /* Interrupt enable register */
val = val | (1 << 6); /* enable the receive interrupt */
outl(val, card->io_port + CSR7);
-
- leave("enable_receive_interrupt");
}
/*
@@ -951,13 +877,10 @@ must be called with the lock held and interrupts disabled.
static void enable_link_interrupt(struct xircom_private *card)
{
unsigned int val;
- enter("enable_link_interrupt");
val = inl(card->io_port + CSR7); /* Interrupt enable register */
val = val | (1 << 27); /* enable the link status chage interrupt */
outl(val, card->io_port + CSR7);
-
- leave("enable_link_interrupt");
}
@@ -970,12 +893,9 @@ must be called with the lock held and interrupts disabled.
static void disable_all_interrupts(struct xircom_private *card)
{
unsigned int val;
- enter("enable_all_interrupts");
val = 0; /* disable all interrupts */
outl(val, card->io_port + CSR7);
-
- leave("disable_all_interrupts");
}
/*
@@ -986,7 +906,6 @@ must be called with the lock held and interrupts disabled.
static void enable_common_interrupts(struct xircom_private *card)
{
unsigned int val;
- enter("enable_link_interrupt");
val = inl(card->io_port + CSR7); /* Interrupt enable register */
val |= (1<<16); /* Normal Interrupt Summary */
@@ -998,8 +917,6 @@ static void enable_common_interrupts(struct xircom_private *card)
val |= (1<<2); /* Transmit Buffer Unavailable */
val |= (1<<1); /* Transmit Process Stopped */
outl(val, card->io_port + CSR7);
-
- leave("enable_link_interrupt");
}
/*
@@ -1010,13 +927,11 @@ must be called with the lock held and interrupts disabled.
static int enable_promisc(struct xircom_private *card)
{
unsigned int val;
- enter("enable_promisc");
val = inl(card->io_port + CSR6);
val = val | (1 << 6);
outl(val, card->io_port + CSR6);
- leave("enable_promisc");
return 1;
}
@@ -1031,7 +946,6 @@ Must be called in locked state with interrupts disabled
static int link_status(struct xircom_private *card)
{
unsigned int val;
- enter("link_status");
val = inb(card->io_port + CSR12);
@@ -1042,7 +956,6 @@ static int link_status(struct xircom_private *card)
/* If we get here -> no link at all */
- leave("link_status");
return 0;
}
@@ -1061,8 +974,6 @@ static void read_mac_address(struct xircom_private *card)
unsigned long flags;
int i;
- enter("read_mac_address");
-
spin_lock_irqsave(&card->lock, flags);
outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */
@@ -1090,7 +1001,6 @@ static void read_mac_address(struct xircom_private *card)
}
spin_unlock_irqrestore(&card->lock, flags);
pr_debug(" %pM\n", card->dev->dev_addr);
- leave("read_mac_address");
}
@@ -1103,8 +1013,6 @@ static void transceiver_voodoo(struct xircom_private *card)
{
unsigned long flags;
- enter("transceiver_voodoo");
-
/* disable all powermanagement */
pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
@@ -1122,7 +1030,6 @@ static void transceiver_voodoo(struct xircom_private *card)
spin_unlock_irqrestore(&card->lock, flags);
netif_start_queue(card->dev);
- leave("transceiver_voodoo");
}
@@ -1131,8 +1038,6 @@ static void xircom_up(struct xircom_private *card)
unsigned long flags;
int i;
- enter("xircom_up");
-
/* disable all powermanagement */
pci_write_config_dword(card->pdev, PCI_POWERMGMT, 0x0000);
@@ -1156,87 +1061,84 @@ static void xircom_up(struct xircom_private *card)
trigger_receive(card);
trigger_transmit(card);
netif_start_queue(card->dev);
- leave("xircom_up");
}
/* Bufferoffset is in BYTES */
-static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset)
+static void
+investigate_read_descriptor(struct net_device *dev, struct xircom_private *card,
+ int descnr, unsigned int bufferoffset)
{
- int status;
-
- enter("investigate_read_descriptor");
- status = le32_to_cpu(card->rx_buffer[4*descnr]);
+ int status;
- if ((status > 0)) { /* packet received */
+ status = le32_to_cpu(card->rx_buffer[4*descnr]);
- /* TODO: discard error packets */
+ if (status > 0) { /* packet received */
- short pkt_len = ((status >> 16) & 0x7ff) - 4; /* minus 4, we don't want the CRC */
- struct sk_buff *skb;
+ /* TODO: discard error packets */
- if (pkt_len > 1518) {
- pr_err("Packet length %i is bogus\n", pkt_len);
- pkt_len = 1518;
- }
+ short pkt_len = ((status >> 16) & 0x7ff) - 4;
+ /* minus 4, we don't want the CRC */
+ struct sk_buff *skb;
- skb = dev_alloc_skb(pkt_len + 2);
- if (skb == NULL) {
- dev->stats.rx_dropped++;
- goto out;
- }
- skb_reserve(skb, 2);
- skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
-
- out:
- /* give the buffer back to the card */
- card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000);
- trigger_receive(card);
+ if (pkt_len > 1518) {
+ netdev_err(dev, "Packet length %i is bogus\n", pkt_len);
+ pkt_len = 1518;
}
- leave("investigate_read_descriptor");
-
+ skb = dev_alloc_skb(pkt_len + 2);
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ goto out;
+ }
+ skb_reserve(skb, 2);
+ skb_copy_to_linear_data(skb,
+ &card->rx_buffer[bufferoffset / 4],
+ pkt_len);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+
+out:
+ /* give the buffer back to the card */
+ card->rx_buffer[4*descnr] = cpu_to_le32(0x80000000);
+ trigger_receive(card);
+ }
}
/* Bufferoffset is in BYTES */
-static void investigate_write_descriptor(struct net_device *dev, struct xircom_private *card, int descnr, unsigned int bufferoffset)
+static void
+investigate_write_descriptor(struct net_device *dev,
+ struct xircom_private *card,
+ int descnr, unsigned int bufferoffset)
{
- int status;
-
- enter("investigate_write_descriptor");
+ int status;
- status = le32_to_cpu(card->tx_buffer[4*descnr]);
+ status = le32_to_cpu(card->tx_buffer[4*descnr]);
#if 0
- if (status & 0x8000) { /* Major error */
- pr_err("Major transmit error status %x\n", status);
- card->tx_buffer[4*descnr] = 0;
- netif_wake_queue (dev);
- }
+ if (status & 0x8000) { /* Major error */
+ pr_err("Major transmit error status %x\n", status);
+ card->tx_buffer[4*descnr] = 0;
+ netif_wake_queue (dev);
+ }
#endif
- if (status > 0) { /* bit 31 is 0 when done */
- if (card->tx_skb[descnr]!=NULL) {
- dev->stats.tx_bytes += card->tx_skb[descnr]->len;
- dev_kfree_skb_irq(card->tx_skb[descnr]);
- }
- card->tx_skb[descnr] = NULL;
- /* Bit 8 in the status field is 1 if there was a collision */
- if (status&(1<<8))
- dev->stats.collisions++;
- card->tx_buffer[4*descnr] = 0; /* descriptor is free again */
- netif_wake_queue (dev);
- dev->stats.tx_packets++;
+ if (status > 0) { /* bit 31 is 0 when done */
+ if (card->tx_skb[descnr]!=NULL) {
+ dev->stats.tx_bytes += card->tx_skb[descnr]->len;
+ dev_kfree_skb_irq(card->tx_skb[descnr]);
}
-
- leave("investigate_write_descriptor");
-
+ card->tx_skb[descnr] = NULL;
+ /* Bit 8 in the status field is 1 if there was a collision */
+ if (status & (1 << 8))
+ dev->stats.collisions++;
+ card->tx_buffer[4*descnr] = 0; /* descriptor is free again */
+ netif_wake_queue (dev);
+ dev->stats.tx_packets++;
+ }
}
-
static int __init xircom_init(void)
{
return pci_register_driver(&xircom_ops);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index f5e9ac00a07b..74e94054ab1a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -123,6 +123,9 @@ struct tun_struct {
gid_t group;
struct net_device *dev;
+ u32 set_features;
+#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
+ NETIF_F_TSO6|NETIF_F_UFO)
struct fasync_struct *fasync;
struct tap_filter txflt;
@@ -451,12 +454,20 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static u32 tun_net_fix_features(struct net_device *dev, u32 features)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
+}
+
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
.ndo_change_mtu = tun_net_change_mtu,
+ .ndo_fix_features = tun_net_fix_features,
};
static const struct net_device_ops tap_netdev_ops = {
@@ -465,6 +476,7 @@ static const struct net_device_ops tap_netdev_ops = {
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
.ndo_change_mtu = tun_net_change_mtu,
+ .ndo_fix_features = tun_net_fix_features,
.ndo_set_multicast_list = tun_net_mclist,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -628,8 +640,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
kfree_skb(skb);
return -EINVAL;
}
- } else if (tun->flags & TUN_NOCHECKSUM)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
switch (tun->flags & TUN_TYPE_MASK) {
case TUN_TUN_DEV:
@@ -1088,11 +1099,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun_net_init(dev);
- if (strchr(dev->name, '%')) {
- err = dev_alloc_name(dev, dev->name);
- if (err < 0)
- goto err_free_sk;
- }
+ dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
+ TUN_USER_FEATURES;
+ dev->features = dev->hw_features;
err = register_netdevice(tun->dev);
if (err < 0)
@@ -1158,18 +1167,12 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
/* This is like a cut-down ethtool ops, except done via tun fd so no
* privs required. */
-static int set_offload(struct net_device *dev, unsigned long arg)
+static int set_offload(struct tun_struct *tun, unsigned long arg)
{
- u32 old_features, features;
-
- old_features = dev->features;
- /* Unset features, set them as we chew on the arg. */
- features = (old_features & ~(NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST
- |NETIF_F_TSO_ECN|NETIF_F_TSO|NETIF_F_TSO6
- |NETIF_F_UFO));
+ u32 features = 0;
if (arg & TUN_F_CSUM) {
- features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ features |= NETIF_F_HW_CSUM;
arg &= ~TUN_F_CSUM;
if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
@@ -1195,9 +1198,8 @@ static int set_offload(struct net_device *dev, unsigned long arg)
if (arg)
return -EINVAL;
- dev->features = features;
- if (old_features != dev->features)
- netdev_features_change(dev);
+ tun->set_features = features;
+ netdev_update_features(tun->dev);
return 0;
}
@@ -1262,12 +1264,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case TUNSETNOCSUM:
/* Disable/Enable checksum */
- if (arg)
- tun->flags |= TUN_NOCHECKSUM;
- else
- tun->flags &= ~TUN_NOCHECKSUM;
- tun_debug(KERN_INFO, tun, "checksum %s\n",
+ /* [unimplemented] */
+ tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
arg ? "disabled" : "enabled");
break;
@@ -1316,7 +1315,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
break;
#endif
case TUNSETOFFLOAD:
- ret = set_offload(tun->dev, arg);
+ ret = set_offload(tun, arg);
break;
case TUNSETTXFILTER:
@@ -1548,7 +1547,7 @@ static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = 0;
cmd->advertising = 0;
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
cmd->duplex = DUPLEX_FULL;
cmd->port = PORT_TP;
cmd->phy_address = 0;
@@ -1595,30 +1594,12 @@ static void tun_set_msglevel(struct net_device *dev, u32 value)
#endif
}
-static u32 tun_get_rx_csum(struct net_device *dev)
-{
- struct tun_struct *tun = netdev_priv(dev);
- return (tun->flags & TUN_NOCHECKSUM) == 0;
-}
-
-static int tun_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct tun_struct *tun = netdev_priv(dev);
- if (data)
- tun->flags &= ~TUN_NOCHECKSUM;
- else
- tun->flags |= TUN_NOCHECKSUM;
- return 0;
-}
-
static const struct ethtool_ops tun_ethtool_ops = {
.get_settings = tun_get_settings,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
- .get_rx_csum = tun_get_rx_csum,
- .set_rx_csum = tun_set_rx_csum
};
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 7fa5ec2de942..3de4283344e9 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -846,7 +846,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
netif_stop_queue(dev);
- /* A Tx complete IRQ could have gotten inbetween, making
+ /* A Tx complete IRQ could have gotten between, making
* the ring free again. Only need to recheck here, since
* Tx is serialized.
*/
@@ -1050,7 +1050,7 @@ typhoon_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
/* need to get stats to make these link speed/duplex valid */
typhoon_do_get_stats(tp);
- cmd->speed = tp->speed;
+ ethtool_cmd_speed_set(cmd, tp->speed);
cmd->duplex = tp->duplex;
cmd->phy_address = 0;
cmd->transceiver = XCVR_INTERNAL;
@@ -1068,25 +1068,26 @@ static int
typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct typhoon *tp = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
struct cmd_desc xp_cmd;
__le16 xcvr;
int err;
err = -EINVAL;
- if(cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->autoneg == AUTONEG_ENABLE) {
xcvr = TYPHOON_XCVR_AUTONEG;
} else {
- if(cmd->duplex == DUPLEX_HALF) {
- if(cmd->speed == SPEED_10)
+ if (cmd->duplex == DUPLEX_HALF) {
+ if (speed == SPEED_10)
xcvr = TYPHOON_XCVR_10HALF;
- else if(cmd->speed == SPEED_100)
+ else if (speed == SPEED_100)
xcvr = TYPHOON_XCVR_100HALF;
else
goto out;
- } else if(cmd->duplex == DUPLEX_FULL) {
- if(cmd->speed == SPEED_10)
+ } else if (cmd->duplex == DUPLEX_FULL) {
+ if (speed == SPEED_10)
xcvr = TYPHOON_XCVR_10FULL;
- else if(cmd->speed == SPEED_100)
+ else if (speed == SPEED_100)
xcvr = TYPHOON_XCVR_100FULL;
else
goto out;
@@ -1105,7 +1106,7 @@ typhoon_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
tp->speed = 0xff; /* invalid */
tp->duplex = 0xff; /* invalid */
} else {
- tp->speed = cmd->speed;
+ tp->speed = speed;
tp->duplex = cmd->duplex;
}
@@ -1144,28 +1145,6 @@ typhoon_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-static u32
-typhoon_get_rx_csum(struct net_device *dev)
-{
- /* For now, we don't allow turning off RX checksums.
- */
- return 1;
-}
-
-static int
-typhoon_set_flags(struct net_device *dev, u32 data)
-{
- /* There's no way to turn off the RX VLAN offloading and stripping
- * on the current 3XP firmware -- it does not respect the offload
- * settings -- so we only allow the user to toggle the TX processing.
- */
- if (!(data & ETH_FLAG_RXVLAN))
- return -EINVAL;
-
- return ethtool_op_set_flags(dev, data,
- ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
-}
-
static void
typhoon_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
{
@@ -1187,13 +1166,7 @@ static const struct ethtool_ops typhoon_ethtool_ops = {
.get_wol = typhoon_get_wol,
.set_wol = typhoon_set_wol,
.get_link = ethtool_op_get_link,
- .get_rx_csum = typhoon_get_rx_csum,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
- .set_tso = ethtool_op_set_tso,
.get_ringparam = typhoon_get_ringparam,
- .set_flags = typhoon_set_flags,
- .get_flags = ethtool_op_get_flags,
};
static int
@@ -2482,10 +2455,15 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* We can handle scatter gather, up to 16 entries, and
* we can do IP checksumming (only version 4, doh...)
+ *
+ * There's no way to turn off the RX VLAN offloading and stripping
+ * on the current 3XP firmware -- it does not respect the offload
+ * settings -- so we only allow the user to toggle the TX processing.
*/
- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->features |= NETIF_F_TSO;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_TX;
+ dev->features = dev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_RXCSUM;
if(register_netdev(dev) < 0) {
err_msg = "unable to register netdev";
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 055b87ab4f07..d12fcad145e9 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -80,7 +80,7 @@ struct ucc_geth {
frames) received that were between 128
(Including FCS length==4) and 255 octets */
u32 txok; /* Total number of octets residing in frames
- that where involved in successfull
+ that where involved in successful
transmission */
u16 txcf; /* Total number of PAUSE control frames
transmitted by this MAC */
@@ -759,7 +759,7 @@ struct ucc_geth_hardware_statistics {
frames) received that were between 128
(Including FCS length==4) and 255 octets */
u32 txok; /* Total number of octets residing in frames
- that where involved in successfull
+ that where involved in successful
transmission */
u16 txcf; /* Total number of PAUSE control frames
transmitted by this MAC */
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index 6f92e48f02d3..537fbc0a4401 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -410,7 +410,6 @@ static const struct ethtool_ops uec_ethtool_ops = {
.set_ringparam = uec_set_ringparam,
.get_pauseparam = uec_get_pauseparam,
.set_pauseparam = uec_set_pauseparam,
- .set_sg = ethtool_op_set_sg,
.get_sset_count = uec_get_sset_count,
.get_strings = uec_get_strings,
.get_ethtool_stats = uec_get_ethtool_stats,
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 3ec22c307797..9d4f9117260f 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -258,7 +258,7 @@ config USB_NET_NET1080
optionally with LEDs that indicate traffic
config USB_NET_PLUSB
- tristate "Prolific PL-2301/2302 based cables"
+ tristate "Prolific PL-2301/2302/25A1 based cables"
# if the handshake/init/reset problems, from original 'plusb',
# are ever resolved ... then remove "experimental"
depends on USB_USBNET && EXPERIMENTAL
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6140b56cce53..6998aa6b7bb7 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -847,7 +847,7 @@ static void ax88172_set_multicast(struct net_device *net)
static int ax88172_link_reset(struct usbnet *dev)
{
u8 mode;
- struct ethtool_cmd ecmd;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
@@ -856,8 +856,8 @@ static int ax88172_link_reset(struct usbnet *dev)
if (ecmd.duplex != DUPLEX_FULL)
mode |= ~AX88172_MEDIUM_FD;
- netdev_dbg(dev->net, "ax88172_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
- ecmd.speed, ecmd.duplex, mode);
+ netdev_dbg(dev->net, "ax88172_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
+ ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
@@ -947,20 +947,20 @@ static const struct ethtool_ops ax88772_ethtool_ops = {
static int ax88772_link_reset(struct usbnet *dev)
{
u16 mode;
- struct ethtool_cmd ecmd;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
mode = AX88772_MEDIUM_DEFAULT;
- if (ecmd.speed != SPEED_100)
+ if (ethtool_cmd_speed(&ecmd) != SPEED_100)
mode &= ~AX_MEDIUM_PS;
if (ecmd.duplex != DUPLEX_FULL)
mode &= ~AX_MEDIUM_FD;
- netdev_dbg(dev->net, "ax88772_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
- ecmd.speed, ecmd.duplex, mode);
+ netdev_dbg(dev->net, "ax88772_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
+ ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
@@ -1173,18 +1173,20 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
static int ax88178_link_reset(struct usbnet *dev)
{
u16 mode;
- struct ethtool_cmd ecmd;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
struct asix_data *data = (struct asix_data *)&dev->data;
+ u32 speed;
netdev_dbg(dev->net, "ax88178_link_reset()\n");
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
mode = AX88178_MEDIUM_DEFAULT;
+ speed = ethtool_cmd_speed(&ecmd);
- if (ecmd.speed == SPEED_1000)
+ if (speed == SPEED_1000)
mode |= AX_MEDIUM_GM;
- else if (ecmd.speed == SPEED_100)
+ else if (speed == SPEED_100)
mode |= AX_MEDIUM_PS;
else
mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
@@ -1196,13 +1198,13 @@ static int ax88178_link_reset(struct usbnet *dev)
else
mode &= ~AX_MEDIUM_FD;
- netdev_dbg(dev->net, "ax88178_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
- ecmd.speed, ecmd.duplex, mode);
+ netdev_dbg(dev->net, "ax88178_link_reset() speed: %u duplex: %d setting mode to 0x%04x\n",
+ speed, ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
if (data->phymode == PHY_MODE_MARVELL && data->ledmode)
- marvell_led_status(dev, ecmd.speed);
+ marvell_led_status(dev, speed);
return 0;
}
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 97687d335903..d7221c4a5dcf 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -686,7 +686,7 @@ static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP;
cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP;
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
cmd->duplex = DUPLEX_HALF;
cmd->port = PORT_TP;
cmd->phy_address = 0;
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 8f128541656d..882f53f708df 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -190,7 +190,7 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/*
* EEM packet header format:
- * b0..14: EEM type dependant (Data or Command)
+ * b0..14: EEM type dependent (Data or Command)
* b15: bmType
*/
header = get_unaligned_le16(skb->data);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 341f7056a800..c924ea2bce07 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -460,7 +460,7 @@ static const struct driver_info cdc_info = {
.manage_power = cdc_manage_power,
};
-static const struct driver_info mbm_info = {
+static const struct driver_info wwan_info = {
.description = "Mobile Broadband Network Device",
.flags = FLAG_WWAN,
.bind = usbnet_cdc_bind,
@@ -471,6 +471,7 @@ static const struct driver_info mbm_info = {
/*-------------------------------------------------------------------------*/
+#define HUAWEI_VENDOR_ID 0x12D1
static const struct usb_device_id products [] = {
/*
@@ -566,7 +567,7 @@ static const struct usb_device_id products [] = {
{
USB_DEVICE_AND_INTERFACE_INFO(0x1004, 0x61aa, USB_CLASS_COMM,
USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = 0,
+ .driver_info = (unsigned long)&wwan_info,
},
/*
@@ -587,8 +588,17 @@ static const struct usb_device_id products [] = {
}, {
USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long)&mbm_info,
+ .driver_info = (unsigned long)&wwan_info,
+}, {
+ /* Various Huawei modems with a network port like the UMG1831 */
+ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = HUAWEI_VENDOR_ID,
+ .bInterfaceClass = USB_CLASS_COMM,
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET,
+ .bInterfaceProtocol = 255,
+ .driver_info = (unsigned long)&wwan_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 967371f04454..4ab557d0287d 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,7 +54,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
-#define DRIVER_VERSION "7-Feb-2011"
+#define DRIVER_VERSION "06-May-2011"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@@ -722,7 +722,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
} else {
/* reset variables */
- skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
+ skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
if (skb_out == NULL) {
if (skb != NULL) {
dev_kfree_skb_any(skb);
@@ -861,8 +861,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
/* store last offset */
last_offset = offset;
- if ((last_offset < ctx->tx_max) && ((last_offset %
- le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) {
+ if (((last_offset < ctx->tx_max) && ((last_offset %
+ le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) ||
+ (((last_offset == ctx->tx_max) && ((ctx->tx_max %
+ le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) &&
+ (ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) {
/* force short packet */
*(((u8 *)skb_out->data) + last_offset) = 0;
last_offset++;
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 5002f5be47be..1d93133e9b74 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -599,13 +599,13 @@ static void dm9601_status(struct usbnet *dev, struct urb *urb)
static int dm9601_link_reset(struct usbnet *dev)
{
- struct ethtool_cmd ecmd;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
- netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n",
- ecmd.speed, ecmd.duplex);
+ netdev_dbg(dev->net, "link_reset() speed: %u duplex: %d\n",
+ ethtool_cmd_speed(&ecmd), ecmd.duplex);
return 0;
}
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 7d42f9a2c068..81126ff85e05 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -65,6 +65,7 @@
#define IPHETH_USBINTF_PROTO 1
#define IPHETH_BUF_SIZE 1516
+#define IPHETH_IP_ALIGN 2 /* padding at front of URB */
#define IPHETH_TX_TIMEOUT (5 * HZ)
#define IPHETH_INTFNUM 2
@@ -202,18 +203,21 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
return;
}
- len = urb->actual_length;
- buf = urb->transfer_buffer;
+ if (urb->actual_length <= IPHETH_IP_ALIGN) {
+ dev->net->stats.rx_length_errors++;
+ return;
+ }
+ len = urb->actual_length - IPHETH_IP_ALIGN;
+ buf = urb->transfer_buffer + IPHETH_IP_ALIGN;
- skb = dev_alloc_skb(NET_IP_ALIGN + len);
+ skb = dev_alloc_skb(len);
if (!skb) {
err("%s: dev_alloc_skb: -ENOMEM", __func__);
dev->net->stats.rx_dropped++;
return;
}
- skb_reserve(skb, NET_IP_ALIGN);
- memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN);
+ memcpy(skb_put(skb, len), buf, len);
skb->dev = dev->net;
skb->protocol = eth_type_trans(skb, dev->net);
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 7dc84971f26f..ad0298f9b5f9 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1221,7 +1221,7 @@ static void kaweth_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
if (!kaweth) {
- dev_warn(&intf->dev, "unregistering non-existant device\n");
+ dev_warn(&intf->dev, "unregistering non-existent device\n");
return;
}
netdev = kaweth->net;
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 823c53751307..217aec8a768f 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -45,6 +45,14 @@
* seems to get wedged under load. Prolific docs are weak, and
* don't identify differences between PL2301 and PL2302, much less
* anything to explain the different PL2302 versions observed.
+ *
+ * NOTE: pl2501 has several modes, including pl2301 and pl2302
+ * compatibility. Some docs suggest the difference between 2301
+ * and 2302 is only to make MS-Windows use a different driver...
+ *
+ * pl25a1 glue based on patch from Tony Gibbs. Prolific "docs" on
+ * this chip are as usual incomplete about what control messages
+ * are supported.
*/
/*
@@ -86,16 +94,20 @@ pl_set_QuickLink_features(struct usbnet *dev, int val)
static int pl_reset(struct usbnet *dev)
{
+ int status;
+
/* some units seem to need this reset, others reject it utterly.
* FIXME be more like "naplink" or windows drivers.
*/
- (void) pl_set_QuickLink_features(dev,
+ status = pl_set_QuickLink_features(dev,
PL_S_EN|PL_RESET_OUT|PL_RESET_IN|PL_PEER_E);
+ if (status != 0 && netif_msg_probe(dev))
+ netif_dbg(dev, link, dev->net, "pl_reset --> %d\n", status);
return 0;
}
static const struct driver_info prolific_info = {
- .description = "Prolific PL-2301/PL-2302",
+ .description = "Prolific PL-2301/PL-2302/PL-25A1",
.flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT,
/* some PL-2302 versions seem to fail usb_set_interface() */
.reset = pl_reset,
@@ -111,6 +123,7 @@ static const struct driver_info prolific_info = {
static const struct usb_device_id products [] = {
+/* full speed cables */
{
USB_DEVICE(0x067b, 0x0000), // PL-2301
.driver_info = (unsigned long) &prolific_info,
@@ -119,6 +132,15 @@ static const struct usb_device_id products [] = {
.driver_info = (unsigned long) &prolific_info,
},
+/* high speed cables */
+{
+ USB_DEVICE(0x067b, 0x25a1), /* PL-25A1, no eeprom */
+ .driver_info = (unsigned long) &prolific_info,
+}, {
+ USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
+ .driver_info = (unsigned long) &prolific_info,
+},
+
{ }, // END
};
MODULE_DEVICE_TABLE(usb, products);
@@ -134,16 +156,16 @@ static struct usb_driver plusb_driver = {
static int __init plusb_init(void)
{
- return usb_register(&plusb_driver);
+ return usb_register(&plusb_driver);
}
module_init(plusb_init);
static void __exit plusb_exit(void)
{
- usb_deregister(&plusb_driver);
+ usb_deregister(&plusb_driver);
}
module_exit(plusb_exit);
MODULE_AUTHOR("David Brownell");
-MODULE_DESCRIPTION("Prolific PL-2301/2302 USB Host to Host Link Driver");
+MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 5994a25c56ac..255d6a424a6b 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -104,8 +104,10 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
{
struct cdc_state *info = (void *) &dev->data;
+ struct usb_cdc_notification notification;
int master_ifnum;
int retval;
+ int partial;
unsigned count;
__le32 rsp;
u32 xid = 0, msg_len, request_id;
@@ -133,13 +135,20 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
if (unlikely(retval < 0 || xid == 0))
return retval;
- // FIXME Seems like some devices discard responses when
- // we time out and cancel our "get response" requests...
- // so, this is fragile. Probably need to poll for status.
+ /* Some devices don't respond on the control channel until
+ * polled on the status channel, so do that first. */
+ if (dev->driver_info->data & RNDIS_DRIVER_DATA_POLL_STATUS) {
+ retval = usb_interrupt_msg(
+ dev->udev,
+ usb_rcvintpipe(dev->udev,
+ dev->status->desc.bEndpointAddress),
+ &notification, sizeof(notification), &partial,
+ RNDIS_CONTROL_TIMEOUT_MS);
+ if (unlikely(retval < 0))
+ return retval;
+ }
- /* ignore status endpoint, just poll the control channel;
- * the request probably completed immediately
- */
+ /* Poll the control channel; the request probably completed immediately */
rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
for (count = 0; count < 10; count++) {
memset(buf, 0, CONTROL_BUFFER_SIZE);
@@ -581,17 +590,33 @@ static const struct driver_info rndis_info = {
.tx_fixup = rndis_tx_fixup,
};
+static const struct driver_info rndis_poll_status_info = {
+ .description = "RNDIS device (poll status before control)",
+ .flags = FLAG_ETHER | FLAG_POINTTOPOINT | FLAG_FRAMING_RN | FLAG_NO_SETINT,
+ .data = RNDIS_DRIVER_DATA_POLL_STATUS,
+ .bind = rndis_bind,
+ .unbind = rndis_unbind,
+ .status = rndis_status,
+ .rx_fixup = rndis_rx_fixup,
+ .tx_fixup = rndis_tx_fixup,
+};
+
/*-------------------------------------------------------------------------*/
static const struct usb_device_id products [] = {
{
+ /* 2Wire HomePortal 1000SW */
+ USB_DEVICE_AND_INTERFACE_INFO(0x1630, 0x0042,
+ USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
+ .driver_info = (unsigned long) &rndis_poll_status_info,
+}, {
/* RNDIS is MSFT's un-official variant of CDC ACM */
USB_INTERFACE_INFO(USB_CLASS_COMM, 2 /* ACM */, 0x0ff),
.driver_info = (unsigned long) &rndis_info,
}, {
/* "ActiveSync" is an undocumented variant of RNDIS, used in WM5 */
USB_INTERFACE_INFO(USB_CLASS_MISC, 1, 1),
- .driver_info = (unsigned long) &rndis_info,
+ .driver_info = (unsigned long) &rndis_poll_status_info,
}, {
/* RNDIS for tethering */
USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index e85c89c6706d..041fb7d43c4f 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -843,10 +843,11 @@ static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *e
get_registers(dev, BMCR, 2, &bmcr);
get_registers(dev, ANLP, 2, &lpa);
if (bmcr & BMCR_ANENABLE) {
+ u32 speed = ((lpa & (LPA_100HALF | LPA_100FULL)) ?
+ SPEED_100 : SPEED_10);
+ ethtool_cmd_speed_set(ecmd, speed);
ecmd->autoneg = AUTONEG_ENABLE;
- ecmd->speed = (lpa & (LPA_100HALF | LPA_100FULL)) ?
- SPEED_100 : SPEED_10;
- if (ecmd->speed == SPEED_100)
+ if (speed == SPEED_100)
ecmd->duplex = (lpa & LPA_100FULL) ?
DUPLEX_FULL : DUPLEX_HALF;
else
@@ -854,8 +855,8 @@ static int rtl8150_get_settings(struct net_device *netdev, struct ethtool_cmd *e
DUPLEX_FULL : DUPLEX_HALF;
} else {
ecmd->autoneg = AUTONEG_DISABLE;
- ecmd->speed = (bmcr & BMCR_SPEED100) ?
- SPEED_100 : SPEED_10;
+ ethtool_cmd_speed_set(ecmd, ((bmcr & BMCR_SPEED100) ?
+ SPEED_100 : SPEED_10));
ecmd->duplex = (bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 753ee6eb7edd..15b3d6888ae9 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -65,7 +65,6 @@ struct smsc75xx_priv {
struct usbnet *dev;
u32 rfe_ctl;
u32 multicast_hash_table[DP_SEL_VHF_HASH_LEN];
- bool use_rx_csum;
struct mutex dataport_mutex;
spinlock_t rfe_ctl_lock;
struct work_struct set_multicast;
@@ -504,7 +503,7 @@ static int smsc75xx_update_flowcontrol(struct usbnet *dev, u8 duplex,
static int smsc75xx_link_reset(struct usbnet *dev)
{
struct mii_if_info *mii = &dev->mii;
- struct ethtool_cmd ecmd;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
u16 lcladv, rmtadv;
int ret;
@@ -520,8 +519,9 @@ static int smsc75xx_link_reset(struct usbnet *dev)
lcladv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
rmtadv = smsc75xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
- netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x"
- " rmtadv: %04x", ecmd.speed, ecmd.duplex, lcladv, rmtadv);
+ netif_dbg(dev, link, dev->net, "speed: %u duplex: %d lcladv: %04x"
+ " rmtadv: %04x", ethtool_cmd_speed(&ecmd),
+ ecmd.duplex, lcladv, rmtadv);
return smsc75xx_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
}
@@ -548,28 +548,6 @@ static void smsc75xx_status(struct usbnet *dev, struct urb *urb)
"unexpected interrupt, intdata=0x%08X", intdata);
}
-/* Enable or disable Rx checksum offload engine */
-static int smsc75xx_set_rx_csum_offload(struct usbnet *dev)
-{
- struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
-
- if (pdata->use_rx_csum)
- pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM;
- else
- pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM);
-
- spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
-
- ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
- check_warn_return(ret, "Error writing RFE_CTL");
-
- return 0;
-}
-
static int smsc75xx_ethtool_get_eeprom_len(struct net_device *net)
{
return MAX_EEPROM_SIZE;
@@ -599,34 +577,6 @@ static int smsc75xx_ethtool_set_eeprom(struct net_device *netdev,
return smsc75xx_write_eeprom(dev, ee->offset, ee->len, data);
}
-static u32 smsc75xx_ethtool_get_rx_csum(struct net_device *netdev)
-{
- struct usbnet *dev = netdev_priv(netdev);
- struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
-
- return pdata->use_rx_csum;
-}
-
-static int smsc75xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val)
-{
- struct usbnet *dev = netdev_priv(netdev);
- struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
-
- pdata->use_rx_csum = !!val;
-
- return smsc75xx_set_rx_csum_offload(dev);
-}
-
-static int smsc75xx_ethtool_set_tso(struct net_device *netdev, u32 data)
-{
- if (data)
- netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
- else
- netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
-
- return 0;
-}
-
static const struct ethtool_ops smsc75xx_ethtool_ops = {
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
@@ -638,12 +588,6 @@ static const struct ethtool_ops smsc75xx_ethtool_ops = {
.get_eeprom_len = smsc75xx_ethtool_get_eeprom_len,
.get_eeprom = smsc75xx_ethtool_get_eeprom,
.set_eeprom = smsc75xx_ethtool_set_eeprom,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
- .get_rx_csum = smsc75xx_ethtool_get_rx_csum,
- .set_rx_csum = smsc75xx_ethtool_set_rx_csum,
- .get_tso = ethtool_op_get_tso,
- .set_tso = smsc75xx_ethtool_set_tso,
};
static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -782,6 +726,30 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
return usbnet_change_mtu(netdev, new_mtu);
}
+/* Enable or disable Rx checksum offload engine */
+static int smsc75xx_set_features(struct net_device *netdev, u32 features)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
+
+ if (features & NETIF_F_RXCSUM)
+ pdata->rfe_ctl |= RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM;
+ else
+ pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_CKM | RFE_CTL_IP_CKM);
+
+ spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
+ /* it's racing here! */
+
+ ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
+ check_warn_return(ret, "Error writing RFE_CTL");
+
+ return 0;
+}
+
static int smsc75xx_reset(struct usbnet *dev)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -960,11 +928,7 @@ static int smsc75xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "RFE_CTL set to 0x%08x", pdata->rfe_ctl);
/* Enable or disable checksum offload engines */
- ethtool_op_set_tx_hw_csum(dev->net, DEFAULT_TX_CSUM_ENABLE);
- ret = smsc75xx_set_rx_csum_offload(dev);
- check_warn_return(ret, "Failed to set rx csum offload: %d", ret);
-
- smsc75xx_ethtool_set_tso(dev->net, DEFAULT_TSO_ENABLE);
+ smsc75xx_set_features(dev->net, dev->net->features);
smsc75xx_set_multicast(dev->net);
@@ -1037,6 +1001,7 @@ static const struct net_device_ops smsc75xx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = smsc75xx_ioctl,
.ndo_set_multicast_list = smsc75xx_set_multicast,
+ .ndo_set_features = smsc75xx_set_features,
};
static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -1065,10 +1030,17 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
- pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE;
+ if (DEFAULT_TX_CSUM_ENABLE) {
+ dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ if (DEFAULT_TSO_ENABLE)
+ dev->net->features |= NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_TSO6;
+ }
+ if (DEFAULT_RX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_RXCSUM;
- /* We have to advertise SG otherwise TSO cannot be enabled */
- dev->net->features |= NETIF_F_SG;
+ dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
/* Init all registers */
ret = smsc75xx_reset(dev);
@@ -1091,10 +1063,11 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
}
}
-static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a,
- u32 rx_cmd_b)
+static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
+ u32 rx_cmd_a, u32 rx_cmd_b)
{
- if (unlikely(rx_cmd_a & RX_CMD_A_LCSM)) {
+ if (!(dev->net->features & NETIF_F_RXCSUM) ||
+ unlikely(rx_cmd_a & RX_CMD_A_LCSM)) {
skb->ip_summed = CHECKSUM_NONE;
} else {
skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT));
@@ -1104,8 +1077,6 @@ static void smsc75xx_rx_csum_offload(struct sk_buff *skb, u32 rx_cmd_a,
static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
- struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
-
while (skb->len > 0) {
u32 rx_cmd_a, rx_cmd_b, align_count, size;
struct sk_buff *ax_skb;
@@ -1145,11 +1116,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* last frame in this batch */
if (skb->len == size) {
- if (pdata->use_rx_csum)
- smsc75xx_rx_csum_offload(skb, rx_cmd_a,
- rx_cmd_b);
- else
- skb->ip_summed = CHECKSUM_NONE;
+ smsc75xx_rx_csum_offload(dev, skb, rx_cmd_a,
+ rx_cmd_b);
skb_trim(skb, skb->len - 4); /* remove fcs */
skb->truesize = size + sizeof(struct sk_buff);
@@ -1167,11 +1135,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
ax_skb->data = packet;
skb_set_tail_pointer(ax_skb, size);
- if (pdata->use_rx_csum)
- smsc75xx_rx_csum_offload(ax_skb, rx_cmd_a,
- rx_cmd_b);
- else
- ax_skb->ip_summed = CHECKSUM_NONE;
+ smsc75xx_rx_csum_offload(dev, ax_skb, rx_cmd_a,
+ rx_cmd_b);
skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
ax_skb->truesize = size + sizeof(struct sk_buff);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 727874d9deb6..f74f3ce71526 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -52,8 +52,6 @@ struct smsc95xx_priv {
u32 hash_hi;
u32 hash_lo;
spinlock_t mac_cr_lock;
- bool use_tx_csum;
- bool use_rx_csum;
};
struct usb_context {
@@ -459,7 +457,7 @@ static int smsc95xx_link_reset(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
struct mii_if_info *mii = &dev->mii;
- struct ethtool_cmd ecmd;
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
unsigned long flags;
u16 lcladv, rmtadv;
u32 intdata;
@@ -474,8 +472,9 @@ static int smsc95xx_link_reset(struct usbnet *dev)
lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
- netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x rmtadv: %04x\n",
- ecmd.speed, ecmd.duplex, lcladv, rmtadv);
+ netif_dbg(dev, link, dev->net,
+ "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n",
+ ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv);
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
if (ecmd.duplex != DUPLEX_FULL) {
@@ -517,22 +516,24 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
}
/* Enable or disable Tx & Rx checksum offload engines */
-static int smsc95xx_set_csums(struct usbnet *dev)
+static int smsc95xx_set_features(struct net_device *netdev, u32 features)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct usbnet *dev = netdev_priv(netdev);
u32 read_buf;
- int ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
+ int ret;
+
+ ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
if (ret < 0) {
netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
return ret;
}
- if (pdata->use_tx_csum)
+ if (features & NETIF_F_HW_CSUM)
read_buf |= Tx_COE_EN_;
else
read_buf &= ~Tx_COE_EN_;
- if (pdata->use_rx_csum)
+ if (features & NETIF_F_RXCSUM)
read_buf |= Rx_COE_EN_;
else
read_buf &= ~Rx_COE_EN_;
@@ -576,43 +577,6 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
return smsc95xx_write_eeprom(dev, ee->offset, ee->len, data);
}
-static u32 smsc95xx_ethtool_get_rx_csum(struct net_device *netdev)
-{
- struct usbnet *dev = netdev_priv(netdev);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
- return pdata->use_rx_csum;
-}
-
-static int smsc95xx_ethtool_set_rx_csum(struct net_device *netdev, u32 val)
-{
- struct usbnet *dev = netdev_priv(netdev);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
- pdata->use_rx_csum = !!val;
-
- return smsc95xx_set_csums(dev);
-}
-
-static u32 smsc95xx_ethtool_get_tx_csum(struct net_device *netdev)
-{
- struct usbnet *dev = netdev_priv(netdev);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
- return pdata->use_tx_csum;
-}
-
-static int smsc95xx_ethtool_set_tx_csum(struct net_device *netdev, u32 val)
-{
- struct usbnet *dev = netdev_priv(netdev);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
- pdata->use_tx_csum = !!val;
-
- ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
- return smsc95xx_set_csums(dev);
-}
-
static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_link = usbnet_get_link,
.nway_reset = usbnet_nway_reset,
@@ -624,10 +588,6 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_eeprom_len = smsc95xx_ethtool_get_eeprom_len,
.get_eeprom = smsc95xx_ethtool_get_eeprom,
.set_eeprom = smsc95xx_ethtool_set_eeprom,
- .get_tx_csum = smsc95xx_ethtool_get_tx_csum,
- .set_tx_csum = smsc95xx_ethtool_set_tx_csum,
- .get_rx_csum = smsc95xx_ethtool_get_rx_csum,
- .set_rx_csum = smsc95xx_ethtool_set_rx_csum,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
@@ -730,7 +690,7 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
msleep(10);
bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
timeout++;
- } while ((bmcr & MII_BMCR) && (timeout < 100));
+ } while ((bmcr & BMCR_RESET) && (timeout < 100));
if (timeout >= 100) {
netdev_warn(dev->net, "timeout on PHY Reset");
@@ -755,7 +715,6 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
static int smsc95xx_reset(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- struct net_device *netdev = dev->net;
u32 read_buf, write_buf, burst_cap;
int ret = 0, timeout;
@@ -975,12 +934,7 @@ static int smsc95xx_reset(struct usbnet *dev)
}
/* Enable or disable checksum offload engines */
- ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
- ret = smsc95xx_set_csums(dev);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to set csum offload: %d\n", ret);
- return ret;
- }
+ smsc95xx_set_features(dev->net, dev->net->features);
smsc95xx_set_multicast(dev->net);
@@ -1019,6 +973,7 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = smsc95xx_ioctl,
.ndo_set_multicast_list = smsc95xx_set_multicast,
+ .ndo_set_features = smsc95xx_set_features,
};
static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -1045,8 +1000,12 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
spin_lock_init(&pdata->mac_cr_lock);
- pdata->use_tx_csum = DEFAULT_TX_CSUM_ENABLE;
- pdata->use_rx_csum = DEFAULT_RX_CSUM_ENABLE;
+ if (DEFAULT_TX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_HW_CSUM;
+ if (DEFAULT_RX_CSUM_ENABLE)
+ dev->net->features |= NETIF_F_RXCSUM;
+
+ dev->net->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
smsc95xx_init_mac_address(dev);
@@ -1056,7 +1015,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &smsc95xx_netdev_ops;
dev->net->ethtool_ops = &smsc95xx_ethtool_ops;
dev->net->flags |= IFF_MULTICAST;
- dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD;
+ dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
return 0;
}
@@ -1080,8 +1039,6 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
while (skb->len > 0) {
u32 header, align_count;
struct sk_buff *ax_skb;
@@ -1123,7 +1080,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* last frame in this batch */
if (skb->len == size) {
- if (pdata->use_rx_csum)
+ if (dev->net->features & NETIF_F_RXCSUM)
smsc95xx_rx_csum_offload(skb);
skb_trim(skb, skb->len - 4); /* remove fcs */
skb->truesize = size + sizeof(struct sk_buff);
@@ -1141,7 +1098,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
ax_skb->data = packet;
skb_set_tail_pointer(ax_skb, size);
- if (pdata->use_rx_csum)
+ if (dev->net->features & NETIF_F_RXCSUM)
smsc95xx_rx_csum_offload(ax_skb);
skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
ax_skb->truesize = size + sizeof(struct sk_buff);
@@ -1174,8 +1131,7 @@ static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
struct sk_buff *skb, gfp_t flags)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- bool csum = pdata->use_tx_csum && (skb->ip_summed == CHECKSUM_PARTIAL);
+ bool csum = skb->ip_summed == CHECKSUM_PARTIAL;
int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD;
u32 tx_cmd_a, tx_cmd_b;
@@ -1313,6 +1269,21 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0424, 0x9909),
.driver_info = (unsigned long) &smsc95xx_info,
},
+ {
+ /* SMSC LAN9530 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9530),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC LAN9730 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9730),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
+ {
+ /* SMSC LAN89530 USB Ethernet Device */
+ USB_DEVICE(0x0424, 0x9E08),
+ .driver_info = (unsigned long) &smsc95xx_info,
+ },
{ }, /* END */
};
MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 069c1cf0fdf7..e6dd24466965 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -645,6 +645,7 @@ int usbnet_stop (struct net_device *net)
struct driver_info *info = dev->driver_info;
int retval;
+ clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
netif_info(dev, ifdown, dev->net,
@@ -736,6 +737,7 @@ int usbnet_open (struct net_device *net)
}
}
+ set_bit(EVENT_DEV_OPEN, &dev->flags);
netif_start_queue (net);
netif_info(dev, ifup, dev->net,
"open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
@@ -1259,6 +1261,9 @@ void usbnet_disconnect (struct usb_interface *intf)
if (dev->driver_info->unbind)
dev->driver_info->unbind (dev, intf);
+ usb_kill_urb(dev->interrupt);
+ usb_free_urb(dev->interrupt);
+
free_netdev(net);
usb_put_dev (xdev);
}
@@ -1498,6 +1503,10 @@ int usbnet_resume (struct usb_interface *intf)
int retval;
if (!--dev->suspend_count) {
+ /* resume interrupt URBs */
+ if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags))
+ usb_submit_urb(dev->interrupt, GFP_NOIO);
+
spin_lock_irq(&dev->txq.lock);
while ((res = usb_get_from_anchor(&dev->deferred))) {
@@ -1516,9 +1525,12 @@ int usbnet_resume (struct usb_interface *intf)
smp_mb();
clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
spin_unlock_irq(&dev->txq.lock);
- if (!(dev->txq.qlen >= TX_QLEN(dev)))
- netif_start_queue(dev->net);
- tasklet_schedule (&dev->bh);
+
+ if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
+ if (!(dev->txq.qlen >= TX_QLEN(dev)))
+ netif_start_queue(dev->net);
+ tasklet_schedule (&dev->bh);
+ }
}
return 0;
}
@@ -1529,9 +1541,9 @@ EXPORT_SYMBOL_GPL(usbnet_resume);
static int __init usbnet_init(void)
{
- /* compiler should optimize this out */
- BUILD_BUG_ON (sizeof (((struct sk_buff *)0)->cb)
- < sizeof (struct skb_data));
+ /* Compiler should optimize this out. */
+ BUILD_BUG_ON(
+ FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data));
random_ether_addr(node_id);
return 0;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2de9b90c5f8f..8461576fa015 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -22,7 +22,6 @@
#define MIN_MTU 68 /* Min L3 MTU */
#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
-#define MTU_PAD (ETH_HLEN + 4) /* Max difference between L2 and L3 size MTU */
struct veth_net_stats {
unsigned long rx_packets;
@@ -36,7 +35,6 @@ struct veth_net_stats {
struct veth_priv {
struct net_device *peer;
struct veth_net_stats __percpu *stats;
- unsigned ip_summed;
};
/*
@@ -53,7 +51,7 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = 0;
cmd->advertising = 0;
- cmd->speed = SPEED_10000;
+ ethtool_cmd_speed_set(cmd, SPEED_10000);
cmd->duplex = DUPLEX_FULL;
cmd->port = PORT_TP;
cmd->phy_address = 0;
@@ -99,47 +97,10 @@ static void veth_get_ethtool_stats(struct net_device *dev,
data[0] = priv->peer->ifindex;
}
-static u32 veth_get_rx_csum(struct net_device *dev)
-{
- struct veth_priv *priv;
-
- priv = netdev_priv(dev);
- return priv->ip_summed == CHECKSUM_UNNECESSARY;
-}
-
-static int veth_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct veth_priv *priv;
-
- priv = netdev_priv(dev);
- priv->ip_summed = data ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
- return 0;
-}
-
-static u32 veth_get_tx_csum(struct net_device *dev)
-{
- return (dev->features & NETIF_F_NO_CSUM) != 0;
-}
-
-static int veth_set_tx_csum(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= NETIF_F_NO_CSUM;
- else
- dev->features &= ~NETIF_F_NO_CSUM;
- return 0;
-}
-
static const struct ethtool_ops veth_ethtool_ops = {
.get_settings = veth_get_settings,
.get_drvinfo = veth_get_drvinfo,
.get_link = ethtool_op_get_link,
- .get_rx_csum = veth_get_rx_csum,
- .set_rx_csum = veth_set_rx_csum,
- .get_tx_csum = veth_get_tx_csum,
- .set_tx_csum = veth_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
.get_strings = veth_get_strings,
.get_sset_count = veth_get_sset_count,
.get_ethtool_stats = veth_get_ethtool_stats,
@@ -168,8 +129,9 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
/* don't change ip_summed == CHECKSUM_PARTIAL, as that
will cause bad checksum on forwarded packets */
- if (skb->ip_summed == CHECKSUM_NONE)
- skb->ip_summed = rcv_priv->ip_summed;
+ if (skb->ip_summed == CHECKSUM_NONE &&
+ rcv->features & NETIF_F_RXCSUM)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
length = skb->len;
if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
@@ -304,6 +266,8 @@ static void veth_setup(struct net_device *dev)
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
dev->destructor = veth_dev_free;
+
+ dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
}
/*
@@ -403,6 +367,17 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
if (tb[IFLA_ADDRESS] == NULL)
random_ether_addr(dev->dev_addr);
+ if (tb[IFLA_IFNAME])
+ nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
+ else
+ snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
+
+ if (strchr(dev->name, '%')) {
+ err = dev_alloc_name(dev, dev->name);
+ if (err < 0)
+ goto err_alloc_name;
+ }
+
err = register_netdevice(dev);
if (err < 0)
goto err_register_dev;
@@ -422,6 +397,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
err_register_dev:
/* nothing to do */
+err_alloc_name:
err_configure_peer:
unregister_netdevice(peer);
return err;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 5e7f069eab53..7f23ab913fd9 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -29,6 +29,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "via-rhine"
#define DRV_VERSION "1.5.0"
#define DRV_RELDATE "2010-10-09"
@@ -37,6 +39,7 @@
/* A few user-configurable values.
These may be modified when a driver module is loaded. */
+#define DEBUG
static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
static int max_interrupt_work = 20;
@@ -111,8 +114,7 @@ static const int multicast_filter_limit = 32;
/* These identify the driver base version and may not be removed. */
static const char version[] __devinitconst =
- KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE
- " Written by Donald Becker\n";
+ "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
/* This driver was written to use PCI memory space. Some early versions
of the Rhine may only work correctly with I/O space accesses. */
@@ -495,14 +497,15 @@ static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
static void rhine_init_cam_filter(struct net_device *dev);
static void rhine_update_vcam(struct net_device *dev);
-#define RHINE_WAIT_FOR(condition) do { \
- int i=1024; \
- while (!(condition) && --i) \
- ; \
- if (debug > 1 && i < 512) \
- printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
- DRV_NAME, 1024-i, __func__, __LINE__); \
-} while(0)
+#define RHINE_WAIT_FOR(condition) \
+do { \
+ int i = 1024; \
+ while (!(condition) && --i) \
+ ; \
+ if (debug > 1 && i < 512) \
+ pr_info("%4d cycles used @ %s:%d\n", \
+ 1024 - i, __func__, __LINE__); \
+} while (0)
static inline u32 get_intr_status(struct net_device *dev)
{
@@ -571,8 +574,8 @@ static void rhine_power_init(struct net_device *dev)
default:
reason = "Unknown";
}
- printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
- DRV_NAME, reason);
+ netdev_info(dev, "Woke system up. Reason: %s\n",
+ reason);
}
}
}
@@ -586,8 +589,7 @@ static void rhine_chip_reset(struct net_device *dev)
IOSYNC;
if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
- printk(KERN_INFO "%s: Reset not complete yet. "
- "Trying harder.\n", DRV_NAME);
+ netdev_info(dev, "Reset not complete yet. Trying harder.\n");
/* Force reset */
if (rp->quirks & rqForceReset)
@@ -598,9 +600,9 @@ static void rhine_chip_reset(struct net_device *dev)
}
if (debug > 1)
- printk(KERN_INFO "%s: Reset %s.\n", dev->name,
- (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
- "failed" : "succeeded");
+ netdev_info(dev, "Reset %s\n",
+ (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
+ "failed" : "succeeded");
}
#ifdef USE_MMIO
@@ -728,9 +730,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
- static int printed_version;
- if (!printed_version++)
- printk(version);
+ pr_info_once("%s\n", version);
#endif
io_size = 256;
@@ -765,8 +765,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
/* this should always be supported */
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
- "the card!?\n");
+ dev_err(&pdev->dev,
+ "32-bit PCI DMA addresses not supported by the card!?\n");
goto err_out;
}
@@ -774,7 +774,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if ((pci_resource_len(pdev, 0) < io_size) ||
(pci_resource_len(pdev, 1) < io_size)) {
rc = -EIO;
- printk(KERN_ERR "Insufficient PCI resources, aborting\n");
+ dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
goto err_out;
}
@@ -786,7 +786,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
- printk(KERN_ERR "alloc_etherdev failed\n");
+ dev_err(&pdev->dev, "alloc_etherdev failed\n");
goto err_out;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -804,8 +804,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
ioaddr = pci_iomap(pdev, bar, io_size);
if (!ioaddr) {
rc = -EIO;
- printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
- "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
+ dev_err(&pdev->dev,
+ "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+ pci_name(pdev), io_size, memaddr);
goto err_out_free_res;
}
@@ -820,8 +821,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
unsigned char b = readb(ioaddr+reg);
if (a != b) {
rc = -EIO;
- printk(KERN_ERR "MMIO do not match PIO [%02x] "
- "(%02x != %02x)\n", reg, a, b);
+ dev_err(&pdev->dev,
+ "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+ reg, a, b);
goto err_out_unmap;
}
}
@@ -836,13 +838,15 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
for (i = 0; i < 6; i++)
dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
- memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
- if (!is_valid_ether_addr(dev->perm_addr)) {
- rc = -EIO;
- printk(KERN_ERR "Invalid MAC address\n");
- goto err_out_unmap;
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ /* Report it and use a random ethernet address instead */
+ netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
+ random_ether_addr(dev->dev_addr);
+ netdev_info(dev, "Using random MAC address: %pM\n",
+ dev->dev_addr);
}
+ memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* For Rhine-I/II, phy_id is loaded from EEPROM */
if (!phy_id)
@@ -878,14 +882,14 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
if (rc)
goto err_out_unmap;
- printk(KERN_INFO "%s: VIA %s at 0x%lx, %pM, IRQ %d.\n",
- dev->name, name,
+ netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
+ name,
#ifdef USE_MMIO
- memaddr,
+ memaddr,
#else
- (long)ioaddr,
+ (long)ioaddr,
#endif
- dev->dev_addr, pdev->irq);
+ dev->dev_addr, pdev->irq);
pci_set_drvdata(pdev, dev);
@@ -896,11 +900,11 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
if (mii_status != 0xffff && mii_status != 0x0000) {
rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
- printk(KERN_INFO "%s: MII PHY found at address "
- "%d, status 0x%4.4x advertising %4.4x "
- "Link %4.4x.\n", dev->name, phy_id,
- mii_status, rp->mii_if.advertising,
- mdio_read(dev, phy_id, 5));
+ netdev_info(dev,
+ "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
+ phy_id,
+ mii_status, rp->mii_if.advertising,
+ mdio_read(dev, phy_id, 5));
/* set IFF_RUNNING */
if (mii_status & BMSR_LSTATUS)
@@ -912,8 +916,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
}
rp->mii_if.phy_id = phy_id;
if (debug > 1 && avoid_D3)
- printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
- dev->name);
+ netdev_info(dev, "No D3 power state at shutdown\n");
return 0;
@@ -938,7 +941,7 @@ static int alloc_ring(struct net_device* dev)
TX_RING_SIZE * sizeof(struct tx_desc),
&ring_dma);
if (!ring) {
- printk(KERN_ERR "Could not allocate DMA memory.\n");
+ netdev_err(dev, "Could not allocate DMA memory\n");
return -ENOMEM;
}
if (rp->quirks & rqRhineI) {
@@ -1098,8 +1101,8 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
ioaddr + ChipCmd1);
if (debug > 1)
- printk(KERN_INFO "%s: force_media %d, carrier %d\n", dev->name,
- rp->mii_if.force_media, netif_carrier_ok(dev));
+ netdev_info(dev, "force_media %d, carrier %d\n",
+ rp->mii_if.force_media, netif_carrier_ok(dev));
}
/* Called after status of force_media possibly changed */
@@ -1113,9 +1116,8 @@ static void rhine_set_carrier(struct mii_if_info *mii)
else /* Let MMI library update carrier status */
rhine_check_media(mii->dev, 0);
if (debug > 1)
- printk(KERN_INFO "%s: force_media %d, carrier %d\n",
- mii->dev->name, mii->force_media,
- netif_carrier_ok(mii->dev));
+ netdev_info(mii->dev, "force_media %d, carrier %d\n",
+ mii->force_media, netif_carrier_ok(mii->dev));
}
/**
@@ -1402,8 +1404,7 @@ static int rhine_open(struct net_device *dev)
return rc;
if (debug > 1)
- printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
- dev->name, rp->pdev->irq);
+ netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
rc = alloc_ring(dev);
if (rc) {
@@ -1415,10 +1416,9 @@ static int rhine_open(struct net_device *dev)
rhine_chip_reset(dev);
init_registers(dev);
if (debug > 2)
- printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
- "MII status: %4.4x.\n",
- dev->name, ioread16(ioaddr + ChipCmd),
- mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+ netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
+ __func__, ioread16(ioaddr + ChipCmd),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
netif_start_queue(dev);
@@ -1461,10 +1461,9 @@ static void rhine_tx_timeout(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
- "%4.4x, resetting...\n",
- dev->name, ioread16(ioaddr + IntrStatus),
- mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+ netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
+ ioread16(ioaddr + IntrStatus),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
schedule_work(&rp->reset_task);
}
@@ -1551,8 +1550,8 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
spin_unlock_irqrestore(&rp->lock, flags);
if (debug > 4) {
- printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
- dev->name, rp->cur_tx-1, entry);
+ netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
+ rp->cur_tx-1, entry);
}
return NETDEV_TX_OK;
}
@@ -1578,8 +1577,8 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
IOSYNC;
if (debug > 4)
- printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
- dev->name, intr_status);
+ netdev_dbg(dev, "Interrupt, status %08x\n",
+ intr_status);
if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
@@ -1597,9 +1596,9 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
if (debug > 2 &&
ioread8(ioaddr+ChipCmd) & CmdTxOn)
- printk(KERN_WARNING "%s: "
- "rhine_interrupt() Tx engine "
- "still on.\n", dev->name);
+ netdev_warn(dev,
+ "%s: Tx engine still on\n",
+ __func__);
}
rhine_tx(dev);
}
@@ -1611,16 +1610,15 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
rhine_error(dev, intr_status);
if (--boguscnt < 0) {
- printk(KERN_WARNING "%s: Too much work at interrupt, "
- "status=%#8.8x.\n",
- dev->name, intr_status);
+ netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
+ intr_status);
break;
}
}
if (debug > 3)
- printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
- dev->name, ioread16(ioaddr + IntrStatus));
+ netdev_dbg(dev, "exiting interrupt, status=%08x\n",
+ ioread16(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -1637,15 +1635,14 @@ static void rhine_tx(struct net_device *dev)
while (rp->dirty_tx != rp->cur_tx) {
txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
if (debug > 6)
- printk(KERN_DEBUG "Tx scavenge %d status %8.8x.\n",
- entry, txstatus);
+ netdev_dbg(dev, "Tx scavenge %d status %08x\n",
+ entry, txstatus);
if (txstatus & DescOwn)
break;
if (txstatus & 0x8000) {
if (debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, "
- "Tx status %8.8x.\n",
- dev->name, txstatus);
+ netdev_dbg(dev, "Transmit error, Tx status %08x\n",
+ txstatus);
dev->stats.tx_errors++;
if (txstatus & 0x0400)
dev->stats.tx_carrier_errors++;
@@ -1668,9 +1665,9 @@ static void rhine_tx(struct net_device *dev)
else
dev->stats.collisions += txstatus & 0x0F;
if (debug > 6)
- printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
- (txstatus >> 3) & 0xF,
- txstatus & 0xF);
+ netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
+ (txstatus >> 3) & 0xF,
+ txstatus & 0xF);
dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
dev->stats.tx_packets++;
}
@@ -1703,7 +1700,7 @@ static void rhine_tx(struct net_device *dev)
static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
{
u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
- return ntohs(*(u16 *)trailer);
+ return be16_to_cpup((__be16 *)trailer);
}
/* Process up to limit frames from receive ring */
@@ -1714,9 +1711,9 @@ static int rhine_rx(struct net_device *dev, int limit)
int entry = rp->cur_rx % RX_RING_SIZE;
if (debug > 4) {
- printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
- dev->name, entry,
- le32_to_cpu(rp->rx_head_desc->rx_status));
+ netdev_dbg(dev, "%s(), entry %d status %08x\n",
+ __func__, entry,
+ le32_to_cpu(rp->rx_head_desc->rx_status));
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
@@ -1730,26 +1727,26 @@ static int rhine_rx(struct net_device *dev, int limit)
break;
if (debug > 4)
- printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
- desc_status);
+ netdev_dbg(dev, "%s() status is %08x\n",
+ __func__, desc_status);
if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
if ((desc_status & RxWholePkt) != RxWholePkt) {
- printk(KERN_WARNING "%s: Oversized Ethernet "
- "frame spanned multiple buffers, entry "
- "%#x length %d status %8.8x!\n",
- dev->name, entry, data_size,
- desc_status);
- printk(KERN_WARNING "%s: Oversized Ethernet "
- "frame %p vs %p.\n", dev->name,
- rp->rx_head_desc, &rp->rx_ring[entry]);
+ netdev_warn(dev,
+ "Oversized Ethernet frame spanned multiple buffers, "
+ "entry %#x length %d status %08x!\n",
+ entry, data_size,
+ desc_status);
+ netdev_warn(dev,
+ "Oversized Ethernet frame %p vs %p\n",
+ rp->rx_head_desc,
+ &rp->rx_ring[entry]);
dev->stats.rx_length_errors++;
} else if (desc_status & RxErr) {
/* There was a error. */
if (debug > 2)
- printk(KERN_DEBUG "rhine_rx() Rx "
- "error was %8.8x.\n",
- desc_status);
+ netdev_dbg(dev, "%s() Rx error was %08x\n",
+ __func__, desc_status);
dev->stats.rx_errors++;
if (desc_status & 0x0030)
dev->stats.rx_length_errors++;
@@ -1791,9 +1788,7 @@ static int rhine_rx(struct net_device *dev, int limit)
} else {
skb = rp->rx_skbuff[entry];
if (skb == NULL) {
- printk(KERN_ERR "%s: Inconsistent Rx "
- "descriptor chain.\n",
- dev->name);
+ netdev_err(dev, "Inconsistent Rx descriptor chain\n");
break;
}
rp->rx_skbuff[entry] = NULL;
@@ -1861,7 +1856,7 @@ static void rhine_restart_tx(struct net_device *dev) {
u32 intr_status;
/*
- * If new errors occured, we need to sort them out before doing Tx.
+ * If new errors occurred, we need to sort them out before doing Tx.
* In that case the ISR will be back here RSN anyway.
*/
intr_status = get_intr_status(dev);
@@ -1886,9 +1881,8 @@ static void rhine_restart_tx(struct net_device *dev) {
else {
/* This should never happen */
if (debug > 1)
- printk(KERN_WARNING "%s: rhine_restart_tx() "
- "Another error occured %8.8x.\n",
- dev->name, intr_status);
+ netdev_warn(dev, "%s() Another error occurred %08x\n",
+ __func__, intr_status);
}
}
@@ -1909,21 +1903,19 @@ static void rhine_error(struct net_device *dev, int intr_status)
}
if (intr_status & IntrTxAborted) {
if (debug > 1)
- printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
- dev->name, intr_status);
+ netdev_info(dev, "Abort %08x, frame dropped\n",
+ intr_status);
}
if (intr_status & IntrTxUnderrun) {
if (rp->tx_thresh < 0xE0)
BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
if (debug > 1)
- printk(KERN_INFO "%s: Transmitter underrun, Tx "
- "threshold now %2.2x.\n",
- dev->name, rp->tx_thresh);
+ netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
+ rp->tx_thresh);
}
if (intr_status & IntrTxDescRace) {
if (debug > 2)
- printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
- dev->name);
+ netdev_info(dev, "Tx descriptor write-back race\n");
}
if ((intr_status & IntrTxError) &&
(intr_status & (IntrTxAborted |
@@ -1932,9 +1924,8 @@ static void rhine_error(struct net_device *dev, int intr_status)
BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
}
if (debug > 1)
- printk(KERN_INFO "%s: Unspecified error. Tx "
- "threshold now %2.2x.\n",
- dev->name, rp->tx_thresh);
+ netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
+ rp->tx_thresh);
}
if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
IntrTxError))
@@ -1944,8 +1935,8 @@ static void rhine_error(struct net_device *dev, int intr_status)
IntrTxError | IntrTxAborted | IntrNormalSummary |
IntrTxDescRace)) {
if (debug > 1)
- printk(KERN_ERR "%s: Something Wicked happened! "
- "%8.8x.\n", dev->name, intr_status);
+ netdev_err(dev, "Something Wicked happened! %08x\n",
+ intr_status);
}
spin_unlock(&rp->lock);
@@ -2145,9 +2136,8 @@ static int rhine_close(struct net_device *dev)
spin_lock_irq(&rp->lock);
if (debug > 1)
- printk(KERN_DEBUG "%s: Shutting down ethercard, "
- "status was %4.4x.\n",
- dev->name, ioread16(ioaddr + ChipCmd));
+ netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
+ ioread16(ioaddr + ChipCmd));
/* Switch to loopback mode to avoid hardware races. */
iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
@@ -2265,12 +2255,12 @@ static int rhine_resume(struct pci_dev *pdev)
return 0;
if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
- printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
+ netdev_err(dev, "request_irq failed\n");
ret = pci_set_power_state(pdev, PCI_D0);
if (debug > 1)
- printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
- dev->name, ret ? "failed" : "succeeded", ret);
+ netdev_info(dev, "Entering power state D0 %s (%d)\n",
+ ret ? "failed" : "succeeded", ret);
pci_restore_state(pdev);
@@ -2326,17 +2316,15 @@ static int __init rhine_init(void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
- printk(version);
+ pr_info("%s\n", version);
#endif
if (dmi_check_system(rhine_dmi_table)) {
/* these BIOSes fail at PXE boot if chip is in D3 */
avoid_D3 = 1;
- printk(KERN_WARNING "%s: Broken BIOS detected, avoid_D3 "
- "enabled.\n",
- DRV_NAME);
+ pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
}
else if (avoid_D3)
- printk(KERN_INFO "%s: avoid_D3 set.\n", DRV_NAME);
+ pr_info("avoid_D3 set\n");
return pci_register_driver(&rhine_driver);
}
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 0d6fec6b7d93..06daa9d6fee8 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -292,7 +292,7 @@ VELOCITY_PARAM(DMA_length, "DMA length");
/* IP_byte_align[] is used for IP header DWORD byte aligned
0: indicate the IP header won't be DWORD byte aligned.(Default) .
1: indicate the IP header will be DWORD byte aligned.
- In some enviroment, the IP header should be DWORD byte aligned,
+ In some environment, the IP header should be DWORD byte aligned,
or the packet will be droped when we receive it. (eg: IPVS)
*/
VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned");
@@ -1994,7 +1994,7 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
* @dev: network device
*
* Replace the current skb that is scheduled for Rx processing by a
- * shorter, immediatly allocated skb, if the received packet is small
+ * shorter, immediately allocated skb, if the received packet is small
* enough. This function returns a negative value if the received
* packet is too big or if memory is exhausted.
*/
@@ -2600,8 +2600,7 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
/*
* Handle hardware checksum
*/
- if ((dev->features & NETIF_F_IP_CSUM) &&
- (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP)
td_ptr->tdesc1.TCR |= TCR0_TCPCK;
@@ -2841,6 +2840,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
dev->ethtool_ops = &velocity_ethtool_ops;
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HW_VLAN_TX;
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
@@ -3182,7 +3182,8 @@ static void velocity_ethtool_down(struct net_device *dev)
pci_set_power_state(vptr->pdev, PCI_D3hot);
}
-static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int velocity_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct velocity_info *vptr = netdev_priv(dev);
struct mac_regs __iomem *regs = vptr->mac_regs;
@@ -3228,12 +3229,14 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
break;
}
}
+
if (status & VELOCITY_SPEED_1000)
- cmd->speed = SPEED_1000;
+ ethtool_cmd_speed_set(cmd, SPEED_1000);
else if (status & VELOCITY_SPEED_100)
- cmd->speed = SPEED_100;
+ ethtool_cmd_speed_set(cmd, SPEED_100);
else
- cmd->speed = SPEED_10;
+ ethtool_cmd_speed_set(cmd, SPEED_10);
+
cmd->autoneg = (status & VELOCITY_AUTONEG_ENABLE) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
cmd->port = PORT_TP;
cmd->transceiver = XCVR_INTERNAL;
@@ -3247,9 +3250,11 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
return 0;
}
-static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int velocity_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
{
struct velocity_info *vptr = netdev_priv(dev);
+ u32 speed = ethtool_cmd_speed(cmd);
u32 curr_status;
u32 new_status = 0;
int ret = 0;
@@ -3258,9 +3263,9 @@ static int velocity_set_settings(struct net_device *dev, struct ethtool_cmd *cmd
curr_status &= (~VELOCITY_LINK_FAIL);
new_status |= ((cmd->autoneg) ? VELOCITY_AUTONEG_ENABLE : 0);
- new_status |= ((cmd->speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
- new_status |= ((cmd->speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
- new_status |= ((cmd->speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
+ new_status |= ((speed == SPEED_1000) ? VELOCITY_SPEED_1000 : 0);
+ new_status |= ((speed == SPEED_100) ? VELOCITY_SPEED_100 : 0);
+ new_status |= ((speed == SPEED_10) ? VELOCITY_SPEED_10 : 0);
new_status |= ((cmd->duplex == DUPLEX_FULL) ? VELOCITY_DUPLEX_FULL : 0);
if ((new_status & VELOCITY_AUTONEG_ENABLE) &&
@@ -3457,13 +3462,10 @@ static const struct ethtool_ops velocity_ethtool_ops = {
.get_settings = velocity_get_settings,
.set_settings = velocity_set_settings,
.get_drvinfo = velocity_get_drvinfo,
- .set_tx_csum = ethtool_op_set_tx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
.get_wol = velocity_ethtool_get_wol,
.set_wol = velocity_ethtool_set_wol,
.get_msglevel = velocity_get_msglevel,
.set_msglevel = velocity_set_msglevel,
- .set_sg = ethtool_op_set_sg,
.get_link = velocity_get_link,
.get_coalesce = velocity_get_coalesce,
.set_coalesce = velocity_set_coalesce,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 82dba5aaf423..0cb0b0632672 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -710,17 +710,6 @@ static int virtnet_close(struct net_device *dev)
return 0;
}
-static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- struct virtio_device *vdev = vi->vdev;
-
- if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
- return -ENOSYS;
-
- return ethtool_op_set_tx_hw_csum(dev, data);
-}
-
static void virtnet_set_rx_mode(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -822,10 +811,6 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
}
static const struct ethtool_ops virtnet_ethtool_ops = {
- .set_tx_csum = virtnet_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
- .set_tso = ethtool_op_set_tso,
- .set_ufo = ethtool_op_set_ufo,
.get_link = ethtool_op_get_link,
};
@@ -912,22 +897,29 @@ static int virtnet_probe(struct virtio_device *vdev)
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
- if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
- dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
- dev->features |= NETIF_F_TSO | NETIF_F_UFO
+ dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ if (csum)
+ dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+ dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
/* Individual feature bits: what can host handle? */
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
- dev->features |= NETIF_F_TSO;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
- dev->features |= NETIF_F_TSO6;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
- dev->features |= NETIF_F_TSO_ECN;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
- dev->features |= NETIF_F_UFO;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
+ dev->hw_features |= NETIF_F_TSO;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
+ dev->hw_features |= NETIF_F_TSO6;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+ dev->hw_features |= NETIF_F_TSO_ECN;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+ dev->hw_features |= NETIF_F_UFO;
+
+ if (gso)
+ dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
+ /* (!csum && gso) case will be fixed by register_netdev() */
}
/* Configuration may specify what MAC to use. Otherwise random. */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index cc14b4a75048..fa6e2ac7475a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -178,6 +178,7 @@ static void
vmxnet3_process_events(struct vmxnet3_adapter *adapter)
{
int i;
+ unsigned long flags;
u32 events = le32_to_cpu(adapter->shared->ecr);
if (!events)
return;
@@ -190,10 +191,10 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
- spin_lock(&adapter->cmd_lock);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_QUEUE_STATUS);
- spin_unlock(&adapter->cmd_lock);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
for (i = 0; i < adapter->num_tx_queues; i++)
if (adapter->tqd_start[i].status.stopped)
@@ -892,7 +893,7 @@ vmxnet3_prepare_tso(struct sk_buff *skb,
* Transmits a pkt thru a given tq
* Returns:
* NETDEV_TX_OK: descriptors are setup successfully
- * NETDEV_TX_OK: error occured, the pkt is dropped
+ * NETDEV_TX_OK: error occurred, the pkt is dropped
* NETDEV_TX_BUSY: tx ring is full, queue is stopped
*
* Side-effects:
@@ -1082,7 +1083,7 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
struct sk_buff *skb,
union Vmxnet3_GenericDesc *gdesc)
{
- if (!gdesc->rcd.cnc && adapter->rxcsum) {
+ if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
/* typical case: TCP/UDP over IP and both csums are correct */
if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
VMXNET3_RCD_CSUM_OK) {
@@ -2081,10 +2082,10 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
/* set up feature flags */
- if (adapter->rxcsum)
+ if (adapter->netdev->features & NETIF_F_RXCSUM)
devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
- if (adapter->lro) {
+ if (adapter->netdev->features & NETIF_F_LRO) {
devRead->misc.uptFeatures |= UPT1_F_LRO;
devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
}
@@ -2593,9 +2594,6 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
return -EINVAL;
- if (new_mtu > 1500 && !adapter->jumbo_frame)
- return -EINVAL;
-
netdev->mtu = new_mtu;
/*
@@ -2641,28 +2639,18 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
{
struct net_device *netdev = adapter->netdev;
- netdev->features = NETIF_F_SG |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_TX |
- NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_LRO;
-
- printk(KERN_INFO "features: sg csum vlan jf tso tsoIPv6 lro");
-
- adapter->rxcsum = true;
- adapter->jumbo_frame = true;
- adapter->lro = true;
-
- if (dma64) {
+ netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_LRO;
+ if (dma64)
netdev->features |= NETIF_F_HIGHDMA;
- printk(" highDMA");
- }
+ netdev->vlan_features = netdev->hw_features & ~NETIF_F_HW_VLAN_TX;
+ netdev->features = netdev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
- netdev->vlan_features = netdev->features;
- printk("\n");
+ netdev_info(adapter->netdev,
+ "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
+ dma64 ? " highDMA" : "");
}
@@ -2685,7 +2673,7 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
* Enable MSIx vectors.
* Returns :
* 0 on successful enabling of required vectors,
- * VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required
+ * VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
* could be enabled.
* number of vectors which can be enabled otherwise (this number is smaller
* than VMXNET3_LINUX_MIN_MSIX_VECT)
@@ -2733,13 +2721,14 @@ static void
vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
{
u32 cfg;
+ unsigned long flags;
/* intr settings */
- spin_lock(&adapter->cmd_lock);
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_CONF_INTR);
cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
- spin_unlock(&adapter->cmd_lock);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
adapter->intr.type = cfg & 0x3;
adapter->intr.mask_mode = (cfg >> 2) & 0x3;
@@ -2874,6 +2863,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
.ndo_start_xmit = vmxnet3_xmit_frame,
.ndo_set_mac_address = vmxnet3_set_mac_addr,
.ndo_change_mtu = vmxnet3_change_mtu,
+ .ndo_set_features = vmxnet3_set_features,
.ndo_get_stats = vmxnet3_get_stats,
.ndo_tx_timeout = vmxnet3_tx_timeout,
.ndo_set_multicast_list = vmxnet3_set_mc,
@@ -2894,6 +2884,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
int num_tx_queues;
int num_rx_queues;
+ if (!pci_msi_enabled())
+ enable_mq = 0;
+
#ifdef VMXNET3_RSS
if (enable_mq)
num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 51f2ef142a5b..64303eb3a5fc 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -33,40 +33,6 @@ struct vmxnet3_stat_desc {
};
-static u32
-vmxnet3_get_rx_csum(struct net_device *netdev)
-{
- struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- return adapter->rxcsum;
-}
-
-
-static int
-vmxnet3_set_rx_csum(struct net_device *netdev, u32 val)
-{
- struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
-
- if (adapter->rxcsum != val) {
- adapter->rxcsum = val;
- if (netif_running(netdev)) {
- if (val)
- adapter->shared->devRead.misc.uptFeatures |=
- UPT1_F_RXCSUM;
- else
- adapter->shared->devRead.misc.uptFeatures &=
- ~UPT1_F_RXCSUM;
-
- spin_lock_irqsave(&adapter->cmd_lock, flags);
- VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
- VMXNET3_CMD_UPDATE_FEATURE);
- spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- }
- }
- return 0;
-}
-
-
/* per tq stats maintained by the device */
static const struct vmxnet3_stat_desc
vmxnet3_tq_dev_stats[] = {
@@ -296,28 +262,27 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
}
}
-static int
-vmxnet3_set_flags(struct net_device *netdev, u32 data)
+int vmxnet3_set_features(struct net_device *netdev, u32 features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- u8 lro_requested = (data & ETH_FLAG_LRO) == 0 ? 0 : 1;
- u8 lro_present = (netdev->features & NETIF_F_LRO) == 0 ? 0 : 1;
unsigned long flags;
+ u32 changed = features ^ netdev->features;
- if (ethtool_invalid_flags(netdev, data, ETH_FLAG_LRO))
- return -EINVAL;
-
- if (lro_requested ^ lro_present) {
- /* toggle the LRO feature*/
- netdev->features ^= NETIF_F_LRO;
+ if (changed & (NETIF_F_RXCSUM|NETIF_F_LRO)) {
+ if (features & NETIF_F_RXCSUM)
+ adapter->shared->devRead.misc.uptFeatures |=
+ UPT1_F_RXCSUM;
+ else
+ adapter->shared->devRead.misc.uptFeatures &=
+ ~UPT1_F_RXCSUM;
- /* update harware LRO capability accordingly */
- if (lro_requested)
+ if (features & NETIF_F_LRO)
adapter->shared->devRead.misc.uptFeatures |=
UPT1_F_LRO;
else
adapter->shared->devRead.misc.uptFeatures &=
~UPT1_F_LRO;
+
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_UPDATE_FEATURE);
@@ -459,10 +424,10 @@ vmxnet3_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->transceiver = XCVR_INTERNAL;
if (adapter->link_speed) {
- ecmd->speed = adapter->link_speed;
+ ethtool_cmd_speed_set(ecmd, adapter->link_speed);
ecmd->duplex = DUPLEX_FULL;
} else {
- ecmd->speed = -1;
+ ethtool_cmd_speed_set(ecmd, -1);
ecmd->duplex = -1;
}
return 0;
@@ -654,17 +619,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
.get_wol = vmxnet3_get_wol,
.set_wol = vmxnet3_set_wol,
.get_link = ethtool_op_get_link,
- .get_rx_csum = vmxnet3_get_rx_csum,
- .set_rx_csum = vmxnet3_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_hw_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = ethtool_op_set_tso,
.get_strings = vmxnet3_get_strings,
- .get_flags = ethtool_op_get_flags,
- .set_flags = vmxnet3_set_flags,
.get_sset_count = vmxnet3_get_sset_count,
.get_ethtool_stats = vmxnet3_get_ethtool_stats,
.get_ringparam = vmxnet3_get_ringparam,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index fb5d245ac878..f50d36fdf405 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01001900
+#define VMXNET3_DRIVER_VERSION_NUM 0x01010900
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -329,10 +329,6 @@ struct vmxnet3_adapter {
u8 __iomem *hw_addr0; /* for BAR 0 */
u8 __iomem *hw_addr1; /* for BAR 1 */
- /* feature control */
- bool rxcsum;
- bool lro;
- bool jumbo_frame;
#ifdef VMXNET3_RSS
struct UPT1_RSSConf *rss_conf;
bool rss;
@@ -404,6 +400,9 @@ void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
int
+vmxnet3_set_features(struct net_device *netdev, u32 features);
+
+int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size);
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index e74e4b42592d..32763b2dd73f 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -159,16 +159,15 @@ vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
u64 *steer_ctrl)
{
- struct vxge_hw_vpath_reg __iomem *vp_reg;
+ struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
enum vxge_hw_status status;
u64 val64;
- u32 retry = 0, max_retry = 100;
-
- vp_reg = vpath->vp_reg;
+ u32 retry = 0, max_retry = 3;
- if (vpath->vp_open) {
- max_retry = 3;
- spin_lock(&vpath->lock);
+ spin_lock(&vpath->lock);
+ if (!vpath->vp_open) {
+ spin_unlock(&vpath->lock);
+ max_retry = 100;
}
writeq(*data0, &vp_reg->rts_access_steer_data0);
@@ -187,7 +186,7 @@ vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
VXGE_HW_DEF_DEVICE_POLL_MILLIS);
/* The __vxge_hw_device_register_poll can udelay for a significant
- * amount of time, blocking other proccess from the CPU. If it delays
+ * amount of time, blocking other process from the CPU. If it delays
* for ~5secs, a NMI error can occur. A way around this is to give up
* the processor via msleep, but this is not allowed is under lock.
* So, only allow it to sleep for ~4secs if open. Otherwise, delay for
@@ -1000,7 +999,7 @@ exit:
/**
* vxge_hw_device_hw_info_get - Get the hw information
* Returns the vpath mask that has the bits set for each vpath allocated
- * for the driver, FW version information and the first mac addresse for
+ * for the driver, FW version information, and the first mac address for
* each vpath
*/
enum vxge_hw_status __devinit
@@ -1064,9 +1063,10 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
val64 = readq(&toc->toc_vpath_pointer[i]);
+ spin_lock_init(&vpath.lock);
vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
(bar0 + val64);
- vpath.vp_open = 0;
+ vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
if (status != VXGE_HW_OK)
@@ -1090,7 +1090,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
val64 = readq(&toc->toc_vpath_pointer[i]);
vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
(bar0 + val64);
- vpath.vp_open = 0;
+ vpath.vp_open = VXGE_HW_VP_NOT_OPEN;
status = __vxge_hw_vpath_addr_get(&vpath,
hw_info->mac_addrs[i],
@@ -4646,7 +4646,27 @@ static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
vpath->hldev->tim_int_mask1, vpath->vp_id);
hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
- memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
+ /* If the whole struct __vxge_hw_virtualpath is zeroed, nothing will
+ * work after the interface is brought down.
+ */
+ spin_lock(&vpath->lock);
+ vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
+ spin_unlock(&vpath->lock);
+
+ vpath->vpmgmt_reg = NULL;
+ vpath->nofl_db = NULL;
+ vpath->max_mtu = 0;
+ vpath->vsport_number = 0;
+ vpath->max_kdfc_db = 0;
+ vpath->max_nofl_db = 0;
+ vpath->ringh = NULL;
+ vpath->fifoh = NULL;
+ memset(&vpath->vpath_handles, 0, sizeof(struct list_head));
+ vpath->stats_block = 0;
+ vpath->hw_stats = NULL;
+ vpath->hw_stats_sav = NULL;
+ vpath->sw_stats = NULL;
+
exit:
return;
}
@@ -4670,7 +4690,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
vpath = &hldev->virtual_paths[vp_id];
- spin_lock_init(&hldev->virtual_paths[vp_id].lock);
+ spin_lock_init(&vpath->lock);
vpath->vp_id = vp_id;
vpath->vp_open = VXGE_HW_VP_OPEN;
vpath->hldev = hldev;
@@ -5019,10 +5039,6 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_vp_terminate(devh, vp_id);
- spin_lock(&vpath->lock);
- vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
- spin_unlock(&vpath->lock);
-
vpath_close_exit:
return status;
}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 3c53aa732c9d..359b9b9f8041 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -412,44 +412,48 @@ struct vxge_hw_vp_config {
* See also: struct vxge_hw_tim_intr_config{}.
*/
struct vxge_hw_device_config {
- u32 dma_blockpool_initial;
- u32 dma_blockpool_max;
-#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
-#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
-#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
-#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
-
-#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
-
- u32 intr_mode;
-#define VXGE_HW_INTR_MODE_IRQLINE 0
-#define VXGE_HW_INTR_MODE_MSIX 1
-#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
-
-#define VXGE_HW_INTR_MODE_DEF 0
-
- u32 rth_en;
-#define VXGE_HW_RTH_DISABLE 0
-#define VXGE_HW_RTH_ENABLE 1
-#define VXGE_HW_RTH_DEFAULT 0
-
- u32 rth_it_type;
-#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
-#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
-#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
-
- u32 rts_mac_en;
+ u32 device_poll_millis;
+#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
+#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
+#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
+
+ u32 dma_blockpool_initial;
+ u32 dma_blockpool_max;
+#define VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE 0
+#define VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE 0
+#define VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE 4
+#define VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE 4096
+
+#define VXGE_HW_MAX_PAYLOAD_SIZE_512 2
+
+ u32 intr_mode:2,
+#define VXGE_HW_INTR_MODE_IRQLINE 0
+#define VXGE_HW_INTR_MODE_MSIX 1
+#define VXGE_HW_INTR_MODE_MSIX_ONE_SHOT 2
+
+#define VXGE_HW_INTR_MODE_DEF 0
+
+ rth_en:1,
+#define VXGE_HW_RTH_DISABLE 0
+#define VXGE_HW_RTH_ENABLE 1
+#define VXGE_HW_RTH_DEFAULT 0
+
+ rth_it_type:1,
+#define VXGE_HW_RTH_IT_TYPE_SOLO_IT 0
+#define VXGE_HW_RTH_IT_TYPE_MULTI_IT 1
+#define VXGE_HW_RTH_IT_TYPE_DEFAULT 0
+
+ rts_mac_en:1,
#define VXGE_HW_RTS_MAC_DISABLE 0
#define VXGE_HW_RTS_MAC_ENABLE 1
#define VXGE_HW_RTS_MAC_DEFAULT 0
- struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
-
- u32 device_poll_millis;
-#define VXGE_HW_MIN_DEVICE_POLL_MILLIS 1
-#define VXGE_HW_MAX_DEVICE_POLL_MILLIS 100000
-#define VXGE_HW_DEF_DEVICE_POLL_MILLIS 1000
+ hwts_en:1;
+#define VXGE_HW_HWTS_DISABLE 0
+#define VXGE_HW_HWTS_ENABLE 1
+#define VXGE_HW_HWTS_DEFAULT 1
+ struct vxge_hw_vp_config vp_config[VXGE_HW_MAX_VIRTUAL_PATHS];
};
/**
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index c5eb034107fd..92dd72d3f9de 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -33,7 +33,8 @@ static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
{
/* We currently only support 10Gb/FULL */
if ((info->autoneg == AUTONEG_ENABLE) ||
- (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
+ (ethtool_cmd_speed(info) != SPEED_10000) ||
+ (info->duplex != DUPLEX_FULL))
return -EINVAL;
return 0;
@@ -58,10 +59,10 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
info->transceiver = XCVR_EXTERNAL;
if (netif_carrier_ok(dev)) {
- info->speed = SPEED_10000;
+ ethtool_cmd_speed_set(info, SPEED_10000);
info->duplex = DUPLEX_FULL;
} else {
- info->speed = -1;
+ ethtool_cmd_speed_set(info, -1);
info->duplex = -1;
}
@@ -134,22 +135,29 @@ static void vxge_ethtool_gregs(struct net_device *dev,
/**
* vxge_ethtool_idnic - To physically identify the nic on the system.
* @dev : device pointer.
- * @id : pointer to the structure with identification parameters given by
- * ethtool.
+ * @state : requested LED state
*
* Used to physically identify the NIC on the system.
- * The Link LED will blink for a time specified by the user.
- * Return value:
* 0 on success
*/
-static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
+static int vxge_ethtool_idnic(struct net_device *dev,
+ enum ethtool_phys_id_state state)
{
struct vxgedev *vdev = netdev_priv(dev);
struct __vxge_hw_device *hldev = vdev->devh;
- vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
- msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
- vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
+ break;
+
+ case ETHTOOL_ID_INACTIVE:
+ vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_OFF);
+ break;
+
+ default:
+ return -EINVAL;
+ }
return 0;
}
@@ -1064,35 +1072,6 @@ static int vxge_ethtool_get_regs_len(struct net_device *dev)
return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
}
-static u32 vxge_get_rx_csum(struct net_device *dev)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- return vdev->rx_csum;
-}
-
-static int vxge_set_rx_csum(struct net_device *dev, u32 data)
-{
- struct vxgedev *vdev = netdev_priv(dev);
-
- if (data)
- vdev->rx_csum = 1;
- else
- vdev->rx_csum = 0;
-
- return 0;
-}
-
-static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
-{
- if (data)
- dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
- else
- dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
-
- return 0;
-}
-
static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -1112,40 +1091,6 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
}
}
-static int vxge_set_flags(struct net_device *dev, u32 data)
-{
- struct vxgedev *vdev = netdev_priv(dev);
- enum vxge_hw_status status;
-
- if (ethtool_invalid_flags(dev, data, ETH_FLAG_RXHASH))
- return -EINVAL;
-
- if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
- return 0;
-
- if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
- return -EINVAL;
-
- vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
-
- /* Enabling RTH requires some of the logic in vxge_device_register and a
- * vpath reset. Due to these restrictions, only allow modification
- * while the interface is down.
- */
- status = vxge_reset_all_vpaths(vdev);
- if (status != VXGE_HW_OK) {
- vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
- return -EFAULT;
- }
-
- if (vdev->devh->config.rth_en)
- dev->features |= NETIF_F_RXHASH;
- else
- dev->features &= ~NETIF_F_RXHASH;
-
- return 0;
-}
-
static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -1174,19 +1119,10 @@ static const struct ethtool_ops vxge_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_pauseparam = vxge_ethtool_getpause_data,
.set_pauseparam = vxge_ethtool_setpause_data,
- .get_rx_csum = vxge_get_rx_csum,
- .set_rx_csum = vxge_set_rx_csum,
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = ethtool_op_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = vxge_ethtool_op_set_tso,
.get_strings = vxge_ethtool_get_strings,
- .phys_id = vxge_ethtool_idnic,
+ .set_phys_id = vxge_ethtool_idnic,
.get_sset_count = vxge_ethtool_get_sset_count,
.get_ethtool_stats = vxge_get_ethtool_stats,
- .set_flags = vxge_set_flags,
.flash_device = vxge_fw_flash,
};
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 395423aeec00..fc837cf6bd4d 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -304,22 +304,14 @@ vxge_rx_complete(struct vxge_ring *ring, struct sk_buff *skb, u16 vlan,
"%s: %s:%d skb protocol = %d",
ring->ndev->name, __func__, __LINE__, skb->protocol);
- if (ring->gro_enable) {
- if (ring->vlgrp && ext_info->vlan &&
- (ring->vlan_tag_strip ==
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
- vlan_gro_receive(ring->napi_p, ring->vlgrp,
- ext_info->vlan, skb);
- else
- napi_gro_receive(ring->napi_p, skb);
- } else {
- if (ring->vlgrp && vlan &&
- (ring->vlan_tag_strip ==
- VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
- vlan_hwaccel_receive_skb(skb, ring->vlgrp, vlan);
- else
- netif_receive_skb(skb);
- }
+ if (ring->vlgrp && ext_info->vlan &&
+ (ring->vlan_tag_strip ==
+ VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE))
+ vlan_gro_receive(ring->napi_p, ring->vlgrp,
+ ext_info->vlan, skb);
+ else
+ napi_gro_receive(ring->napi_p, skb);
+
vxge_debug_entryexit(VXGE_TRACE,
"%s: %s:%d Exiting...", ring->ndev->name, __func__, __LINE__);
}
@@ -490,7 +482,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
if ((ext_info.proto & VXGE_HW_FRAME_PROTO_TCP_OR_UDP) &&
!(ext_info.proto & VXGE_HW_FRAME_PROTO_IP_FRAG) &&
- ring->rx_csum && /* Offload Rx side CSUM */
+ (dev->features & NETIF_F_RXCSUM) && /* Offload Rx side CSUM */
ext_info.l3_cksum == VXGE_HW_L3_CKSUM_OK &&
ext_info.l4_cksum == VXGE_HW_L4_CKSUM_OK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -2094,11 +2086,9 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
vdev->config.fifo_indicate_max_pkts;
vpath->fifo.tx_vector_no = 0;
vpath->ring.rx_vector_no = 0;
- vpath->ring.rx_csum = vdev->rx_csum;
vpath->ring.rx_hwts = vdev->rx_hwts;
vpath->is_open = 1;
vdev->vp_handles[i] = vpath->handle;
- vpath->ring.gro_enable = vdev->config.gro_enable;
vpath->ring.vlan_tag_strip = vdev->vlan_tag_strip;
vdev->stats.vpaths_open++;
} else {
@@ -2282,7 +2272,7 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
for (i = 0; i < vdev->no_of_vpath; i++) {
- /* Reduce the chance of loosing alarm interrupts by masking
+ /* Reduce the chance of losing alarm interrupts by masking
* the vector. A pending bit will be set if an alarm is
* generated and on unmask the interrupt will be fired.
*/
@@ -2670,6 +2660,40 @@ static void vxge_poll_vp_lockup(unsigned long data)
mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
}
+static u32 vxge_fix_features(struct net_device *dev, u32 features)
+{
+ u32 changed = dev->features ^ features;
+
+ /* Enabling RTH requires some of the logic in vxge_device_register and a
+ * vpath reset. Due to these restrictions, only allow modification
+ * while the interface is down.
+ */
+ if ((changed & NETIF_F_RXHASH) && netif_running(dev))
+ features ^= NETIF_F_RXHASH;
+
+ return features;
+}
+
+static int vxge_set_features(struct net_device *dev, u32 features)
+{
+ struct vxgedev *vdev = netdev_priv(dev);
+ u32 changed = dev->features ^ features;
+
+ if (!(changed & NETIF_F_RXHASH))
+ return 0;
+
+ /* !netif_running() ensured by vxge_fix_features() */
+
+ vdev->devh->config.rth_en = !!(features & NETIF_F_RXHASH);
+ if (vxge_reset_all_vpaths(vdev) != VXGE_HW_OK) {
+ dev->features = features ^ NETIF_F_RXHASH;
+ vdev->devh->config.rth_en = !!(dev->features & NETIF_F_RXHASH);
+ return -EIO;
+ }
+
+ return 0;
+}
+
/**
* vxge_open
* @dev: pointer to the device structure.
@@ -2788,7 +2812,7 @@ static int vxge_open(struct net_device *dev)
}
/* Enable vpath to sniff all unicast/multicast traffic that not
- * addressed to them. We allow promiscous mode for PF only
+ * addressed to them. We allow promiscuous mode for PF only
*/
val64 = 0;
@@ -2890,7 +2914,7 @@ out0:
return ret;
}
-/* Loop throught the mac address list and delete all the entries */
+/* Loop through the mac address list and delete all the entries */
static void vxge_free_mac_add_list(struct vxge_vpath *vpath)
{
@@ -2957,7 +2981,7 @@ static int do_vxge_close(struct net_device *dev, int do_io)
val64);
}
- /* Remove the function 0 from promiscous mode */
+ /* Remove the function 0 from promiscuous mode */
vxge_hw_mgmt_reg_write(vdev->devh,
vxge_hw_mgmt_reg_type_mrpcim,
0,
@@ -3112,8 +3136,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
return net_stats;
}
-static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
- int enable)
+static enum vxge_hw_status vxge_timestamp_config(struct __vxge_hw_device *devh)
{
enum vxge_hw_status status;
u64 val64;
@@ -3123,27 +3146,24 @@ static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
* required for the driver to load (due to a hardware bug),
* there is no need to do anything special here.
*/
- if (enable)
- val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
- VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
- VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
- else
- val64 = 0;
+ val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
+ VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
+ VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
- status = vxge_hw_mgmt_reg_write(vdev->devh,
+ status = vxge_hw_mgmt_reg_write(devh,
vxge_hw_mgmt_reg_type_mrpcim,
0,
offsetof(struct vxge_hw_mrpcim_reg,
xmac_timestamp),
val64);
- vxge_hw_device_flush_io(vdev->devh);
+ vxge_hw_device_flush_io(devh);
+ devh->config.hwts_en = VXGE_HW_HWTS_ENABLE;
return status;
}
static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
{
struct hwtstamp_config config;
- enum vxge_hw_status status;
int i;
if (copy_from_user(&config, data, sizeof(config)))
@@ -3164,10 +3184,6 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
- status = vxge_timestamp_config(vdev, 0);
- if (status != VXGE_HW_OK)
- return -EFAULT;
-
vdev->rx_hwts = 0;
config.rx_filter = HWTSTAMP_FILTER_NONE;
break;
@@ -3186,8 +3202,7 @@ static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- status = vxge_timestamp_config(vdev, 1);
- if (status != VXGE_HW_OK)
+ if (vdev->devh->config.hwts_en != VXGE_HW_HWTS_ENABLE)
return -EFAULT;
vdev->rx_hwts = 1;
@@ -3378,6 +3393,8 @@ static const struct net_device_ops vxge_netdev_ops = {
.ndo_do_ioctl = vxge_ioctl,
.ndo_set_mac_address = vxge_set_mac_addr,
.ndo_change_mtu = vxge_change_mtu,
+ .ndo_fix_features = vxge_fix_features,
+ .ndo_set_features = vxge_set_features,
.ndo_vlan_rx_register = vxge_vlan_rx_register,
.ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
.ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
@@ -3424,14 +3441,21 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
vdev->devh = hldev;
vdev->pdev = hldev->pdev;
memcpy(&vdev->config, config, sizeof(struct vxge_config));
- vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
vdev->rx_hwts = 0;
vdev->titan1 = (vdev->pdev->revision == VXGE_HW_TITAN1_PCI_REVISION);
SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
- ndev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
- NETIF_F_HW_VLAN_FILTER;
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_SG |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_HW_VLAN_TX;
+ if (vdev->config.rth_steering != NO_STEERING)
+ ndev->hw_features |= NETIF_F_RXHASH;
+
+ ndev->features |= ndev->hw_features |
+ NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
+
/* Driver entry points */
ndev->irq = vdev->pdev->irq;
ndev->base_addr = (unsigned long) hldev->bar0;
@@ -3443,11 +3467,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
vxge_initialize_ethtool_ops(ndev);
- if (vdev->config.rth_steering != NO_STEERING) {
- ndev->features |= NETIF_F_RXHASH;
- hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
- }
-
/* Allocate memory for vpath */
vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
no_of_vpath, GFP_KERNEL);
@@ -3459,9 +3478,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
goto _out1;
}
- ndev->features |= NETIF_F_SG;
-
- ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
"%s : checksuming enabled", __func__);
@@ -3471,11 +3487,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
"%s : using High DMA", __func__);
}
- ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
-
- if (vdev->config.gro_enable)
- ndev->features |= NETIF_F_GRO;
-
ret = register_netdev(ndev);
if (ret) {
vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
@@ -4005,15 +4016,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
vdev->config.tx_steering_type = 0;
}
- if (vdev->config.gro_enable) {
- vxge_debug_init(VXGE_ERR,
- "%s: Generic receive offload enabled",
- vdev->ndev->name);
- } else
- vxge_debug_init(VXGE_TRACE,
- "%s: Generic receive offload disabled",
- vdev->ndev->name);
-
if (vdev->config.addr_learn_en)
vxge_debug_init(VXGE_TRACE,
"%s: MAC Address learning enabled", vdev->ndev->name);
@@ -4575,12 +4577,29 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
goto _exit4;
}
+ /* Always enable HWTS. This will always cause the FCS to be invalid,
+ * due to the fact that HWTS is using the FCS as the location of the
+ * timestamp. The HW FCS checking will still correctly determine if
+ * there is a valid checksum, and the FCS is being removed by the driver
+ * anyway. So no fucntionality is being lost. Since it is always
+ * enabled, we now simply use the ioctl call to set whether or not the
+ * driver should be paying attention to the HWTS.
+ */
+ if (is_privileged == VXGE_HW_OK) {
+ status = vxge_timestamp_config(hldev);
+ if (status != VXGE_HW_OK) {
+ vxge_debug_init(VXGE_ERR, "%s: HWTS enable failed",
+ VXGE_DRIVER_NAME);
+ ret = -EFAULT;
+ goto _exit4;
+ }
+ }
+
vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
/* set private device info */
pci_set_drvdata(pdev, hldev);
- ll_config->gro_enable = VXGE_GRO_ALWAYS_AGGREGATE;
ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
ll_config->addr_learn_en = addr_learn_en;
ll_config->rth_algorithm = RTH_ALG_JENKINS;
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 40474f0da576..ed120aba443d 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -168,9 +168,6 @@ struct vxge_config {
#define NEW_NAPI_WEIGHT 64
int napi_weight;
-#define VXGE_GRO_DONOT_AGGREGATE 0
-#define VXGE_GRO_ALWAYS_AGGREGATE 1
- int gro_enable;
int intr_type;
#define INTA 0
#define MSI 1
@@ -290,13 +287,11 @@ struct vxge_ring {
unsigned long interrupt_count;
unsigned long jiffies;
- /* copy of the flag indicating whether rx_csum is to be used */
- u32 rx_csum:1,
- rx_hwts:1;
+ /* copy of the flag indicating whether rx_hwts is to be used */
+ u32 rx_hwts:1;
int pkts_processed;
int budget;
- int gro_enable;
struct napi_struct napi;
struct napi_struct *napi_p;
@@ -369,9 +364,8 @@ struct vxgedev {
*/
u16 all_multi_flg;
- /* A flag indicating whether rx_csum is to be used or not. */
- u32 rx_csum:1,
- rx_hwts:1,
+ /* A flag indicating whether rx_hwts is to be used or not. */
+ u32 rx_hwts:1,
titan1:1;
struct vxge_msix_entry *vxge_entries;
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 8674f331311c..2638b8d97b8f 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -1111,7 +1111,7 @@ void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
* vxge_hw_channel_dtr_count
* @channel: Channel handle. Obtained via vxge_hw_channel_open().
*
- * Retreive number of DTRs available. This function can not be called
+ * Retrieve number of DTRs available. This function can not be called
* from data path. ring_initial_replenishi() is the only user.
*/
int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
@@ -2060,7 +2060,7 @@ enum vxge_hw_status vxge_hw_vpath_promisc_enable(
vpath = vp->vpath;
- /* Enable promiscous mode for function 0 only */
+ /* Enable promiscuous mode for function 0 only */
if (!(vpath->hldev->access_rights &
VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
return VXGE_HW_OK;
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 9d9dfda4c7ab..4a518a3b131c 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -240,7 +240,7 @@ struct vxge_hw_tim_intr_config {
u32 btimer_val;
#define VXGE_HW_MIN_TIM_BTIMER_VAL 0
#define VXGE_HW_MAX_TIM_BTIMER_VAL 67108864
-#define VXGE_HW_USE_FLASH_DEFAULT 0xffffffff
+#define VXGE_HW_USE_FLASH_DEFAULT (~0)
u32 timer_ac_en;
#define VXGE_HW_TIM_TIMER_AC_ENABLE 1
@@ -681,7 +681,7 @@ struct vxge_hw_xmac_aggr_stats {
* @rx_red_discard: Count of received frames that are discarded because of RED
* (Random Early Discard).
* @rx_xgmii_ctrl_err_cnt: Maintains a count of unexpected or misplaced control
- * characters occuring between times of normal data transmission
+ * characters occurring between times of normal data transmission
* (i.e. not included in RX_XGMII_DATA_ERR_CNT). This counter is
* incremented when either -
* 1) The Reconciliation Sublayer (RS) is expecting one control
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 581e21525e85..b9efa28bab3e 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -16,8 +16,8 @@
#define VXGE_VERSION_MAJOR "2"
#define VXGE_VERSION_MINOR "5"
-#define VXGE_VERSION_FIX "2"
-#define VXGE_VERSION_BUILD "22259"
+#define VXGE_VERSION_FIX "3"
+#define VXGE_VERSION_BUILD "22640"
#define VXGE_VERSION_FOR "k"
#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 10bafd59f9c3..6fb6f8e667d0 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -329,7 +329,7 @@ static int startmicrocode(struct cosa_data *cosa, int address);
static int readmem(struct cosa_data *cosa, char __user *data, int addr, int len);
static int cosa_reset_and_read_id(struct cosa_data *cosa, char *id);
-/* Auxilliary functions */
+/* Auxiliary functions */
static int get_wait_data(struct cosa_data *cosa);
static int put_wait_data(struct cosa_data *cosa, int data);
static int puthexnumber(struct cosa_data *cosa, int number);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 1481a446fefb..21b104db5a90 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -341,10 +341,6 @@ static int dlci_add(struct dlci_add *dlci)
}
}
- err = dev_alloc_name(master, master->name);
- if (err < 0)
- goto err2;
-
*(short *)(master->dev_addr) = dlci->dlci;
dlp = netdev_priv(master);
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 4578e5b4b411..acb9ea830628 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -56,7 +56,7 @@
* IV. Notes
* The current error (XDU, RFO) recovery code is untested.
* So far, RDO takes his RX channel down and the right sequence to enable it
- * again is still a mistery. If RDO happens, plan a reboot. More details
+ * again is still a mystery. If RDO happens, plan a reboot. More details
* in the code (NB: as this happens, TX still works).
* Don't mess the cables during operation, especially on DTE ports. I don't
* suggest it for DCE either but at least one can get some messages instead
@@ -1065,7 +1065,7 @@ static int dscc4_open(struct net_device *dev)
/*
* Due to various bugs, there is no way to reliably reset a
- * specific port (manufacturer's dependant special PCI #RST wiring
+ * specific port (manufacturer's dependent special PCI #RST wiring
* apart: it affects all ports). Thus the device goes in the best
* silent mode possible at dscc4_close() time and simply claims to
* be up if it's opened again. It still isn't possible to change
@@ -1230,9 +1230,9 @@ static inline int dscc4_check_clock_ability(int port)
* scaling. Of course some rounding may take place.
* - no high speed mode (40Mb/s). May be trivial to do but I don't have an
* appropriate external clocking device for testing.
- * - no time-slot/clock mode 5: shameless lazyness.
+ * - no time-slot/clock mode 5: shameless laziness.
*
- * The clock signals wiring can be (is ?) manufacturer dependant. Good luck.
+ * The clock signals wiring can be (is ?) manufacturer dependent. Good luck.
*
* BIG FAT WARNING: if the device isn't provided enough clocking signal, it
* won't pass the init sequence. For example, straight back-to-back DTE without
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 0edb535bb2b5..fc433f28c047 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1070,7 +1070,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
hdlc_device *hdlc = dev_to_hdlc(frad);
pvc_device *pvc;
struct net_device *dev;
- int result, used;
+ int used;
if ((pvc = add_pvc(frad, dlci)) == NULL) {
printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
@@ -1106,13 +1106,6 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->tx_queue_len = 0;
dev->ml_priv = pvc;
- result = dev_alloc_name(dev, dev->name);
- if (result < 0) {
- free_netdev(dev);
- delete_unused_pvcs(hdlc);
- return result;
- }
-
if (register_netdevice(dev) != 0) {
free_netdev(dev);
delete_unused_pvcs(hdlc);
diff --git a/drivers/net/wan/hostess_sv11.c b/drivers/net/wan/hostess_sv11.c
index 48edc5f4dac8..e817583e6ec5 100644
--- a/drivers/net/wan/hostess_sv11.c
+++ b/drivers/net/wan/hostess_sv11.c
@@ -15,7 +15,7 @@
* The hardware does the bus handling to avoid the need for delays between
* touching control registers.
*
- * Port B isnt wired (why - beats me)
+ * Port B isn't wired (why - beats me)
*
* Generic HDLC port Copyright (C) 2008 Krzysztof Halasa <khc@pm.waw.pl>
*/
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 6c571e198835..f1e1643dc3eb 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -178,7 +178,7 @@
*
* The resulting average clock frequency (assuming 33.333 MHz oscillator) is:
* freq = 66.666 MHz / (A + (B + 1) / (C + 1))
- * minumum freq = 66.666 MHz / (A + 1)
+ * minimum freq = 66.666 MHz / (A + 1)
* maximum freq = 66.666 MHz / A
*
* Example: A = 2, B = 2, C = 7, CLOCK_CR register = 2 << 22 | 2 << 12 | 7
@@ -230,7 +230,7 @@
#define PKT_PIPE_MODE_WRITE 0x57
/* HDLC packet status values - desc->status */
-#define ERR_SHUTDOWN 1 /* stop or shutdown occurrance */
+#define ERR_SHUTDOWN 1 /* stop or shutdown occurrence */
#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 7f5bb913c8b9..eec463f99c09 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -338,10 +338,6 @@ static int lapbeth_new_device(struct net_device *dev)
dev_hold(dev);
lapbeth->ethdev = dev;
- rc = dev_alloc_name(ndev, ndev->name);
- if (rc < 0)
- goto fail;
-
rc = -EIO;
if (register_netdevice(ndev))
goto fail;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 70feb84df670..b7f2358d23be 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -24,7 +24,7 @@
*
* Linux driver notes:
* Linux uses the device struct lmc_private to pass private information
- * arround.
+ * around.
*
* The initialization portion of this driver (the lmc_reset() and the
* lmc_dec_reset() functions, as well as the led controls and the
diff --git a/drivers/net/wan/lmc/lmc_var.h b/drivers/net/wan/lmc/lmc_var.h
index 65d01978e784..01ad45218d19 100644
--- a/drivers/net/wan/lmc/lmc_var.h
+++ b/drivers/net/wan/lmc/lmc_var.h
@@ -180,7 +180,7 @@ struct lmc___ctl {
/*
- * Carefull, look at the data sheet, there's more to this
+ * Careful, look at the data sheet, there's more to this
* structure than meets the eye. It should probably be:
*
* struct tulip_desc_t {
@@ -380,7 +380,7 @@ struct lmc___softc {
/* CSR6 settings */
#define OPERATION_MODE 0x00000200 /* Full Duplex */
#define PROMISC_MODE 0x00000040 /* Promiscuous Mode */
-#define RECIEVE_ALL 0x40000000 /* Recieve All */
+#define RECIEVE_ALL 0x40000000 /* Receive All */
#define PASS_BAD_FRAMES 0x00000008 /* Pass Bad Frames */
/* Dec control registers CSR6 as well */
@@ -398,7 +398,7 @@ struct lmc___softc {
#define TULIP_CMD_RECEIVEALL 0x40000000L /* (RW) Receivel all frames? */
#define TULIP_CMD_MUSTBEONE 0x02000000L /* (RW) Must Be One (21140) */
#define TULIP_CMD_TXTHRSHLDCTL 0x00400000L /* (RW) Transmit Threshold Mode (21140) */
-#define TULIP_CMD_STOREFWD 0x00200000L /* (RW) Store and Foward (21140) */
+#define TULIP_CMD_STOREFWD 0x00200000L /* (RW) Store and Forward (21140) */
#define TULIP_CMD_NOHEARTBEAT 0x00080000L /* (RW) No Heartbeat (21140) */
#define TULIP_CMD_PORTSELECT 0x00040000L /* (RW) Post Select (100Mb) (21140) */
#define TULIP_CMD_FULLDUPLEX 0x00000200L /* (RW) Full Duplex Mode */
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index 93956861ea21..0806232e0f8f 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -542,7 +542,7 @@ static void z8530_dma_tx(struct z8530_channel *chan)
z8530_tx(chan);
return;
}
- /* This shouldnt occur in DMA mode */
+ /* This shouldn't occur in DMA mode */
printk(KERN_ERR "DMA tx - bogus event!\n");
z8530_tx(chan);
}
@@ -1219,7 +1219,7 @@ static const char *z8530_type_name[]={
* @io: the port value in question
*
* Describe a Z8530 in a standard format. We must pass the I/O as
- * the port offset isnt predictable. The main reason for this function
+ * the port offset isn't predictable. The main reason for this function
* is to try and get a common format of report.
*/
@@ -1588,7 +1588,7 @@ static void z8530_rx_done(struct z8530_channel *c)
unsigned long flags;
/*
- * Complete this DMA. Neccessary to find the length
+ * Complete this DMA. Necessary to find the length
*/
flags=claim_dma_lock();
@@ -1657,7 +1657,7 @@ static void z8530_rx_done(struct z8530_channel *c)
* fifo length for this. Thus we want to flip to the new
* buffer and then mess around copying and allocating
* things. For the current case it doesn't matter but
- * if you build a system where the sync irq isnt blocked
+ * if you build a system where the sync irq isn't blocked
* by the kernel IRQ disable then you need only block the
* sync IRQ for the RT_LOCK area.
*
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 12b84ed0e38a..727d728649b7 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -378,7 +378,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
* the device's state as sometimes we need to do a link-renew (the BS
* wants us to renew a DHCP lease, for example).
*
- * In fact, doc says that everytime we get a link-up, we should do a
+ * In fact, doc says that every time we get a link-up, we should do a
* DHCP negotiation...
*/
static
@@ -675,7 +675,7 @@ void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code)
* - the ack message wasn't formatted correctly
*
* The returned skb has been allocated with wimax_msg_to_user_alloc(),
- * it contains the reponse in a netlink attribute and is ready to be
+ * it contains the response in a netlink attribute and is ready to be
* passed up to user space with wimax_msg_to_user_send(). To access
* the payload and its length, use wimax_msg_{data,len}() on the skb.
*
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 65bc334ed57b..47cae7150bc1 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -654,7 +654,7 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
if (result == -EUCLEAN) {
/*
* We come here because the reset during operational mode
- * wasn't successully done and need to proceed to a bus
+ * wasn't successfully done and need to proceed to a bus
* reset. For the dev_reset_handle() to be able to handle
* the reset event later properly, we restore boot_mode back
* to the state before previous reset. ie: just like we are
@@ -755,7 +755,7 @@ EXPORT_SYMBOL_GPL(i2400m_error_recovery);
* Alloc the command and ack buffers for boot mode
*
* Get the buffers needed to deal with boot mode messages. These
- * buffers need to be allocated before the sdio recieve irq is setup.
+ * buffers need to be allocated before the sdio receive irq is setup.
*/
static
int i2400m_bm_buf_alloc(struct i2400m *i2400m)
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 8b55a5b14152..85dadd5bf4be 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -54,7 +54,7 @@
* endpoint and read from it in the notification endpoint. In SDIO we
* talk to it via the write address and read from the read address.
*
- * Upon entrance to boot mode, the device sends (preceeded with a few
+ * Upon entrance to boot mode, the device sends (preceded with a few
* zero length packets (ZLPs) on the notification endpoint in USB) a
* reboot barker (4 le32 words with the same value). We ack it by
* sending the same barker to the device. The device acks with a
@@ -1589,7 +1589,7 @@ int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
i2400m->fw_name = fw_name;
ret = i2400m_fw_bootstrap(i2400m, fw, flags);
release_firmware(fw);
- if (ret >= 0) /* firmware loaded succesfully */
+ if (ret >= 0) /* firmware loaded successfully */
break;
i2400m->fw_name = NULL;
}
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index eb80243e22df..6650fde99e1d 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -105,14 +105,14 @@ static inline void edc_init(struct edc *edc)
*
* @edc: pointer to error density counter.
* @max_err: maximum number of errors we can accept over the timeframe
- * @timeframe: lenght of the timeframe (in jiffies).
+ * @timeframe: length of the timeframe (in jiffies).
*
* Returns: !0 1 if maximum acceptable errors per timeframe has been
* exceeded. 0 otherwise.
*
* This is way to determine if the number of acceptable errors per time
* period has been exceeded. It is not accurate as there are cases in which
- * this scheme will not work, for example if there are periodic occurences
+ * this scheme will not work, for example if there are periodic occurrences
* of errors that straddle updates to the start time. This scheme is
* sufficient for our usage.
*
@@ -204,7 +204,7 @@ enum {
* usb_autopm_get/put_interface() barriers when executing
* commands. See doc in i2400mu_suspend() for more information.
*
- * @rx_size_auto_shrink: if true, the rx_size is shrinked
+ * @rx_size_auto_shrink: if true, the rx_size is shrunk
* automatically based on the average size of the received
* transactions. This allows the receive code to allocate smaller
* chunks of memory and thus reduce pressure on the memory
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 030cbfd31704..5eacc653a94d 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -526,7 +526,7 @@ struct i2400m_barker_db;
*
* @barker: barker type that the device uses; this is initialized by
* i2400m_is_boot_barker() the first time it is called. Then it
- * won't change during the life cycle of the device and everytime
+ * won't change during the life cycle of the device and every time
* a boot barker is received, it is just verified for it being the
* same.
*
@@ -928,7 +928,7 @@ extern void i2400m_report_tlv_rf_switches_status(
struct i2400m *, const struct i2400m_tlv_rf_switches_status *);
/*
- * Helpers for firmware backwards compability
+ * Helpers for firmware backwards compatibility
*
* As we aim to support at least the firmware version that was
* released with the previous kernel/driver release, some code will be
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 94742e1eafe0..2edd8fe1c1f3 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -166,7 +166,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
d_fnstart(3, dev, "(ws %p i2400m %p skb %p)\n", ws, i2400m, skb);
result = -EINVAL;
if (skb == NULL) {
- dev_err(dev, "WAKE&TX: skb dissapeared!\n");
+ dev_err(dev, "WAKE&TX: skb disappeared!\n");
goto out_put;
}
/* If we have, somehow, lost the connection after this was
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 9e02b90b0080..b0dba35a8ad2 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -27,7 +27,7 @@
* - report changes in the HW RF Kill switch [with
* wimax_rfkill_{sw,hw}_report(), which happens when we detect those
* indications coming through hardware reports]. We also do it on
- * initialization to let the stack know the intial HW state.
+ * initialization to let the stack know the initial HW state.
*
* - implement indications from the stack to change the SW RF Kill
* switch (coming from sysfs, the wimax stack or user space).
@@ -73,7 +73,7 @@ int i2400m_radio_is(struct i2400m *i2400m, enum wimax_rf_state state)
* Generic Netlink will call this function when a message is sent from
* userspace to change the software RF-Kill switch status.
*
- * This function will set the device's sofware RF-Kill switch state to
+ * This function will set the device's software RF-Kill switch state to
* match what is requested.
*
* NOTE: the i2400m has a strict state machine; we can only set the
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 844133b44af0..2f94a872101f 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -349,7 +349,7 @@ error_no_waiter:
*
* For reports: We can't clone the original skb where the data is
* because we need to send this up via netlink; netlink has to add
- * headers and we can't overwrite what's preceeding the payload...as
+ * headers and we can't overwrite what's preceding the payload...as
* it is another message. So we just dup them.
*/
static
@@ -425,7 +425,7 @@ error_check:
*
* As in i2400m_rx_ctl(), we can't clone the original skb where the
* data is because we need to send this up via netlink; netlink has to
- * add headers and we can't overwrite what's preceeding the
+ * add headers and we can't overwrite what's preceding the
* payload...as it is another message. So we just dup them.
*/
static
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 3f819efc06b5..4b30ed11d785 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -149,7 +149,7 @@
* (with a moved message header to make sure it is size-aligned to
* 16), TAIL room that was unusable (and thus is marked with a message
* header that says 'skip this') and at the head of the buffer, an
- * imcomplete message with a couple of payloads.
+ * incomplete message with a couple of payloads.
*
* N ___________________________________________________
* | |
@@ -819,7 +819,7 @@ EXPORT_SYMBOL_GPL(i2400m_tx);
* the FIF that is ready for transmission.
*
* It sets the state in @i2400m to indicate the bus-specific driver is
- * transfering that message (i2400m->tx_msg_size).
+ * transferring that message (i2400m->tx_msg_size).
*
* Once the transfer is completed, call i2400m_tx_msg_sent().
*
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index b58ec56b86f8..1fda46c55eb3 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -169,7 +169,7 @@ retry:
*
* Command can be a raw command, which requires no preparation (and
* which might not even be following the command format). Checks that
- * the right amount of data was transfered.
+ * the right amount of data was transferred.
*
* To satisfy USB requirements (no onstack, vmalloc or in data segment
* buffers), we copy the command to i2400m->bm_cmd_buf and send it from
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index a26483a812a5..e3257681e360 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -58,7 +58,7 @@
* a zillion reads; by serializing, we are throttling.
*
* - RX data processing can get heavy enough so that it is not
- * appropiate for doing it in the USB callback; thus we run it in a
+ * appropriate for doing it in the USB callback; thus we run it in a
* process context.
*
* We provide a read buffer of an arbitrary size (short of a page); if
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index c65b9979f87e..ac357acfb3e9 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -168,7 +168,7 @@ retry:
/*
* Get the next TX message in the TX FIFO and send it to the device
*
- * Note we exit the loop if i2400mu_tx() fails; that funtion only
+ * Note we exit the loop if i2400mu_tx() fails; that function only
* fails on hard error (failing to tx a buffer not being one of them,
* see its doc).
*
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 57a79b0475f6..4e5c7a11f04a 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1884,7 +1884,7 @@ static int airo_open(struct net_device *dev) {
/* Make sure the card is configured.
* Wireless Extensions may postpone config changes until the card
* is open (to pipeline changes and speed-up card setup). If
- * those changes are not yet commited, do it now - Jean II */
+ * those changes are not yet committed, do it now - Jean II */
if (test_bit(FLAG_COMMIT, &ai->flags)) {
disable_MAC(ai, 1);
writeConfigRid(ai, 1);
@@ -1992,7 +1992,7 @@ static int mpi_send_packet (struct net_device *dev)
/*
* Magic, the cards firmware needs a length count (2 bytes) in the host buffer
* right after TXFID_HDR.The TXFID_HDR contains the status short so payloadlen
- * is immediatly after it. ------------------------------------------------
+ * is immediately after it. ------------------------------------------------
* |TXFIDHDR+STATUS|PAYLOADLEN|802.3HDR|PACKETDATA|
* ------------------------------------------------
*/
@@ -2006,7 +2006,7 @@ static int mpi_send_packet (struct net_device *dev)
sizeof(wifictlhdr8023) + 2 ;
/*
- * Firmware automaticly puts 802 header on so
+ * Firmware automatically puts 802 header on so
* we don't need to account for it in the length
*/
if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
@@ -2531,7 +2531,7 @@ static int mpi_init_descriptors (struct airo_info *ai)
/*
* We are setting up three things here:
* 1) Map AUX memory for descriptors: Rid, TxFid, or RxFid.
- * 2) Map PCI memory for issueing commands.
+ * 2) Map PCI memory for issuing commands.
* 3) Allocate memory (shared) to send and receive ethernet frames.
*/
static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
@@ -3947,7 +3947,7 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
if ( max_tries == -1 ) {
airo_print_err(ai->dev->name,
- "Max tries exceeded when issueing command");
+ "Max tries exceeded when issuing command");
if (IN4500(ai, COMMAND) & COMMAND_BUSY)
OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
return ERROR;
@@ -4173,7 +4173,7 @@ done:
}
/* Note, that we are using BAP1 which is also used by transmit, so
- * make sure this isnt called when a transmit is happening */
+ * make sure this isn't called when a transmit is happening */
static int PC4500_writerid(struct airo_info *ai, u16 rid,
const void *pBuf, int len, int lock)
{
@@ -4776,7 +4776,7 @@ static int proc_stats_rid_open( struct inode *inode,
if (!statsLabels[i]) continue;
if (j+strlen(statsLabels[i])+16>4096) {
airo_print_warn(apriv->dev->name,
- "Potentially disasterous buffer overflow averted!");
+ "Potentially disastrous buffer overflow averted!");
break;
}
j+=sprintf(data->rbuffer+j, "%s: %u\n", statsLabels[i],
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
index d0a664039c87..034015397093 100644
--- a/drivers/net/wireless/ath/ath5k/ani.h
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -27,7 +27,7 @@
#define ATH5K_ANI_RSSI_THR_HIGH 40
#define ATH5K_ANI_RSSI_THR_LOW 7
-/* maximum availabe levels */
+/* maximum available levels */
#define ATH5K_ANI_MAX_FIRSTEP_LVL 2
#define ATH5K_ANI_MAX_NOISE_IMM_LVL 1
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index aaa0cd72b0de..22047628ccfa 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1976,7 +1976,7 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
#define FUDGE AR5K_TUNE_SW_BEACON_RESP + 3
/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
- * Since we later substract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
+ * Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
* configuration we need to make sure it is bigger than that. */
if (bc_tsf == -1) {
@@ -1994,7 +1994,7 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
intval |= AR5K_BEACON_RESET_TSF;
} else if (bc_tsf > hw_tsf) {
/*
- * beacon received, SW merge happend but HW TSF not yet updated.
+ * beacon received, SW merge happened but HW TSF not yet updated.
* not possible to reconfigure timers yet, but next time we
* receive a beacon with the same BSSID, the hardware will
* automatically update the TSF and then we need to reconfigure
@@ -2693,7 +2693,7 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
synchronize_irq(sc->irq);
stop_tasklets(sc);
- /* Save ani mode and disable ANI durring
+ /* Save ani mode and disable ANI during
* reset. If we don't we might get false
* PHY error interrupts. */
ani_mode = ah->ah_sc->ani_state.ani_mode;
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index dd7cd95c364a..62172d585723 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -51,7 +51,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
/*
* Validate input
* - Zero retries don't make sense.
- * - A zero rate will put the HW into a mode where it continously sends
+ * - A zero rate will put the HW into a mode where it continuously sends
* noise on the channel, so it is important to avoid this.
*/
if (unlikely(tx_tries0 == 0)) {
@@ -196,7 +196,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
/*
* Validate input
* - Zero retries don't make sense.
- * - A zero rate will put the HW into a mode where it continously sends
+ * - A zero rate will put the HW into a mode where it continuously sends
* noise on the channel, so it is important to avoid this.
*/
if (unlikely(tx_tries0 == 0)) {
@@ -307,7 +307,7 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
/*
* Rates can be 0 as long as the retry count is 0 too.
* A zero rate and nonzero retry count will put the HW into a mode where
- * it continously sends noise on the channel, so it is important to
+ * it continuously sends noise on the channel, so it is important to
* avoid this.
*/
if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
@@ -349,7 +349,7 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
\***********************/
/*
- * Proccess the tx status descriptor on 5210/5211
+ * Process the tx status descriptor on 5210/5211
*/
static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc, struct ath5k_tx_status *ts)
@@ -398,7 +398,7 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
}
/*
- * Proccess a tx status descriptor on 5212
+ * Process a tx status descriptor on 5212
*/
static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc, struct ath5k_tx_status *ts)
@@ -490,7 +490,7 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
}
/*
- * Proccess the rx status descriptor on 5210/5211
+ * Process the rx status descriptor on 5210/5211
*/
static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc, struct ath5k_rx_status *rs)
@@ -573,7 +573,7 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
}
/*
- * Proccess the rx status descriptor on 5212
+ * Process the rx status descriptor on 5212
*/
static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index e9263e4c7f3e..1fef84f87c78 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1134,7 +1134,7 @@ ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
*
* To recreate the curves we read here the points and interpolate
* later. Note that in most cases only 2 (higher and lower) curves are
- * used (like RF5112) but vendors have the oportunity to include all
+ * used (like RF5112) but vendors have the opportunity to include all
* 4 curves on eeprom. The final curve (higher power) has an extra
* point for better accuracy like RF5112.
*/
@@ -1360,7 +1360,7 @@ ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
/*
* Pd gain 0 is not the last pd gain
* so it only has 2 pd points.
- * Continue wih pd gain 1.
+ * Continue with pd gain 1.
*/
pcinfo->pwr_i[1] = (val >> 10) & 0x1f;
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 5cc4a2fe47b6..296c316a8341 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -58,7 +58,7 @@ static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
*csz = (int)u8tmp;
/*
- * This check was put in to avoid "unplesant" consequences if
+ * This check was put in to avoid "unpleasant" consequences if
* the bootrom has not fully initialized all PCI devices.
* Sometimes the cache line size register is not set
*/
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 71b60b7c617e..712a9ac4000e 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -465,7 +465,7 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
}
/*
- * The AR5210 uses promiscous mode to detect radar activity
+ * The AR5210 uses promiscuous mode to detect radar activity
*/
if (ah->ah_version == AR5K_AR5210 &&
(filter & AR5K_RX_FILTER_RADARERR)) {
@@ -699,8 +699,8 @@ ath5k_check_timer_win(int a, int b, int window, int intval)
* The need for this function arises from the fact that we have 4 separate
* HW timer registers (TIMER0 - TIMER3), which are closely related to the
* next beacon target time (NBTT), and that the HW updates these timers
- * seperately based on the current TSF value. The hardware increments each
- * timer by the beacon interval, when the local TSF coverted to TU is equal
+ * separately based on the current TSF value. The hardware increments each
+ * timer by the beacon interval, when the local TSF converted to TU is equal
* to the value stored in the timer.
*
* The reception of a beacon with the same BSSID can update the local HW TSF
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 62ce2f4e8605..55441913344d 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -335,11 +335,11 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
* http://madwifi-project.org/ticket/1659
* with various measurements and diagrams
*
- * TODO: Deal with power drops due to probes by setting an apropriate
+ * TODO: Deal with power drops due to probes by setting an appropriate
* tx power on the probe packets ! Make this part of the calibration process.
*/
-/* Initialize ah_gain durring attach */
+/* Initialize ah_gain during attach */
int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
{
/* Initialize the gain optimization values */
@@ -1049,7 +1049,7 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
\**************************/
/*
- * Convertion needed for RF5110
+ * Conversion needed for RF5110
*/
static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
{
@@ -1088,7 +1088,7 @@ static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
}
/*
- * Convertion needed for 5111
+ * Conversion needed for 5111
*/
static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
struct ath5k_athchan_2ghz *athchan)
@@ -2201,7 +2201,7 @@ ath5k_create_power_curve(s16 pmin, s16 pmax,
/*
* Get the surrounding per-channel power calibration piers
* for a given frequency so that we can interpolate between
- * them and come up with an apropriate dataset for our current
+ * them and come up with an appropriate dataset for our current
* channel.
*/
static void
@@ -2618,7 +2618,7 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah)
/*
* Set the gain boundaries and create final Power to PDADC table
*
- * We can have up to 4 pd curves, we need to do a simmilar process
+ * We can have up to 4 pd curves, we need to do a similar process
* as we do for RF5112. This time we don't have an edge_flag but we
* set the gain boundaries on a separate register.
*/
@@ -2826,13 +2826,13 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
u32 target = channel->center_freq;
int pdg, i;
- /* Get surounding freq piers for this channel */
+ /* Get surrounding freq piers for this channel */
ath5k_get_chan_pcal_surrounding_piers(ah, channel,
&pcinfo_L,
&pcinfo_R);
/* Loop over pd gain curves on
- * surounding freq piers by index */
+ * surrounding freq piers by index */
for (pdg = 0; pdg < ee->ee_pd_gains[ee_mode]; pdg++) {
/* Fill curves in reverse order
@@ -2923,7 +2923,7 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
}
/* Interpolate between curves
- * of surounding freq piers to
+ * of surrounding freq piers to
* get the final curve for this
* pd gain. Re-use tmpL for interpolation
* output */
@@ -2947,7 +2947,7 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
/* Fill min and max power levels for this
* channel by interpolating the values on
- * surounding channels to complete the dataset */
+ * surrounding channels to complete the dataset */
ah->ah_txpower.txp_min_pwr = ath5k_get_interpolated_value(target,
(s16) pcinfo_L->freq,
(s16) pcinfo_R->freq,
@@ -3179,7 +3179,7 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* FIXME: TPC scale reduction */
- /* Get surounding channels for per-rate power table
+ /* Get surrounding channels for per-rate power table
* calibration */
ath5k_get_rate_pcal_data(ah, channel, &rate_info);
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index e1c9abd8c879..d12b827033c1 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -132,8 +132,8 @@
* As i can see in ar5k_ar5210_tx_start Reyk uses some of the values of BCR
* for this register, so i guess TQ1V,TQ1FV and BDMAE have the same meaning
* here and SNP/SNAP means "snapshot" (so this register gets synced with BCR).
- * So SNAPPEDBCRVALID sould also stand for "snapped BCR -values- valid", so i
- * renamed it to SNAPSHOTSVALID to make more sense. I realy have no idea what
+ * So SNAPPEDBCRVALID should also stand for "snapped BCR -values- valid", so i
+ * renamed it to SNAPSHOTSVALID to make more sense. I really have no idea what
* else can it be. I also renamed SNPBCMD to SNPADHOC to match BCR.
*/
#define AR5K_BSR 0x002c /* Register Address */
@@ -283,7 +283,7 @@
*/
#define AR5K_ISR 0x001c /* Register Address [5210] */
#define AR5K_PISR 0x0080 /* Register Address [5211+] */
-#define AR5K_ISR_RXOK 0x00000001 /* Frame successfuly recieved */
+#define AR5K_ISR_RXOK 0x00000001 /* Frame successfuly received */
#define AR5K_ISR_RXDESC 0x00000002 /* RX descriptor request */
#define AR5K_ISR_RXERR 0x00000004 /* Receive error */
#define AR5K_ISR_RXNOFRM 0x00000008 /* No frame received (receive timeout) */
@@ -372,12 +372,12 @@
/*
* Interrupt Mask Registers
*
- * As whith ISRs 5210 has one IMR (AR5K_IMR) and 5211/5212 has one primary
+ * As with ISRs 5210 has one IMR (AR5K_IMR) and 5211/5212 has one primary
* (AR5K_PIMR) and 4 secondary IMRs (AR5K_SIMRx). Note that ISR/IMR flags match.
*/
#define AR5K_IMR 0x0020 /* Register Address [5210] */
#define AR5K_PIMR 0x00a0 /* Register Address [5211+] */
-#define AR5K_IMR_RXOK 0x00000001 /* Frame successfuly recieved*/
+#define AR5K_IMR_RXOK 0x00000001 /* Frame successfuly received*/
#define AR5K_IMR_RXDESC 0x00000002 /* RX descriptor request*/
#define AR5K_IMR_RXERR 0x00000004 /* Receive error*/
#define AR5K_IMR_RXNOFRM 0x00000008 /* No frame received (receive timeout)*/
@@ -895,7 +895,7 @@
#define AR5K_PCICFG_SL_INTEN 0x00000800 /* Enable interrupts when asleep */
#define AR5K_PCICFG_LED_BCTL 0x00001000 /* Led blink (?) [5210] */
#define AR5K_PCICFG_RETRY_FIX 0x00001000 /* Enable pci core retry fix */
-#define AR5K_PCICFG_SL_INPEN 0x00002000 /* Sleep even whith pending interrupts*/
+#define AR5K_PCICFG_SL_INPEN 0x00002000 /* Sleep even with pending interrupts*/
#define AR5K_PCICFG_SPWR_DN 0x00010000 /* Mask for power status */
#define AR5K_PCICFG_LEDMODE 0x000e0000 /* Ledmode [5211+] */
#define AR5K_PCICFG_LEDMODE_PROP 0x00000000 /* Blink on standard traffic [5211+] */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index 4361704fe0d0..4bf9dab4f2b3 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -170,7 +170,7 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
/**
* ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
- * @ah: atheros hardware stucture
+ * @ah: atheros hardware structure
* @chan:
*
* For the external AR2133/AR5133 radios, takes the MHz channel value and set
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index c7ad0562596a..1e220354e4be 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3238,7 +3238,7 @@ static int ar9300_compress_decision(struct ath_hw *ah,
eep = ar9003_eeprom_struct_find_by_id(reference);
if (eep == NULL) {
ath_dbg(common, ATH_DBG_EEPROM,
- "cant find reference eeprom struct %d\n",
+ "can't find reference eeprom struct %d\n",
reference);
return -1;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 5c76352b1319..cee970fdf652 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -358,7 +358,7 @@ ret:
* HTC Messages are handled directly here and the obtained SKB
* is freed.
*
- * Sevice messages (Data, WMI) passed to the corresponding
+ * Service messages (Data, WMI) passed to the corresponding
* endpoint RX handlers, which have to free the SKB.
*/
void ath9k_htc_rx_msg(struct htc_target *htc_handle,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index e83128c50f7b..9c65459be100 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -44,7 +44,7 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
*csz = (int)u8tmp;
/*
- * This check was put in to avoid "unplesant" consequences if
+ * This check was put in to avoid "unpleasant" consequences if
* the bootrom has not fully initialized all PCI devices.
* Sometimes the cache line size register is not set
*/
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index b877d9639bdc..4ccbf2ddb553 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -792,7 +792,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
- /* Set the choosen rate. No RTS for first series entry. */
+ /* Set the chosen rate. No RTS for first series entry. */
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
try_per_rate, rix, 0);
}
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 41469d7a2cda..97dd1fac98b6 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -637,8 +637,8 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
(u32)ATH_AMPDU_LIMIT_MAX);
/*
- * h/w can accept aggregates upto 16 bit lengths (65535).
- * The IE, however can hold upto 65536, which shows up here
+ * h/w can accept aggregates up to 16 bit lengths (65535).
+ * The IE, however can hold up to 65536, which shows up here
* as zero. Ignore 65536 since we are constrained by hw.
*/
if (tid->an->maxampdu)
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index beb725d7547f..bb578690935e 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -161,7 +161,7 @@ struct carl9170_sta_tid {
* Naturally: The higher the limit, the faster the device CAN send.
* However, even a slight over-commitment at the wrong time and the
* hardware is doomed to send all already-queued frames at suboptimal
- * rates. This in turn leads to an enourmous amount of unsuccessful
+ * rates. This in turn leads to an enormous amount of unsuccessful
* retries => Latency goes up, whereas the throughput goes down. CRASH!
*/
#define CARL9170_NUM_TX_LIMIT_HARD ((AR9170_TXQ_DEPTH * 3) / 2)
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index b6b0de600506..b6ae0e179c8d 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -427,7 +427,7 @@ static u32 carl9170_def_val(u32 reg, bool is_2ghz, bool is_40mhz)
/*
* initialize some phy regs from eeprom values in modal_header[]
- * acc. to band and bandwith
+ * acc. to band and bandwidth
*/
static int carl9170_init_phy_from_eeprom(struct ar9170 *ar,
bool is_2ghz, bool is_40mhz)
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 84866a4b8350..ec21ea9fd8d5 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -849,7 +849,7 @@ static void carl9170_rx_stream(struct ar9170 *ar, void *buf, unsigned int len)
/*
* nested carl9170_rx_stream call!
*
- * termination is guranteed, even when the
+ * termination is guaranteed, even when the
* combined frame also have an element with
* a bad tag.
*/
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index f82c400be288..2fb53d067512 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -430,7 +430,7 @@ static void carl9170_usb_rx_complete(struct urb *urb)
* The system is too slow to cope with
* the enormous workload. We have simply
* run out of active rx urbs and this
- * unfortunatly leads to an unpredictable
+ * unfortunately leads to an unpredictable
* device.
*/
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index 183c28281385..cc11d66f15bc 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -86,7 +86,7 @@
* IFRAME-01: 0110
*
* An easy eye-inspeciton of this already should tell you that this frame
- * will not pass our check. This is beacuse the bssid_mask tells the
+ * will not pass our check. This is because the bssid_mask tells the
* hardware to only look at the second least significant bit and the
* common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
* as 1, which does not match 0.
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 02b896208b1a..028310f263c8 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -269,7 +269,7 @@ ath_reg_apply_active_scan_flags(struct wiphy *wiphy,
}
/*
- * If a country IE has been recieved check its rule for this
+ * If a country IE has been received check its rule for this
* channel first before enabling active scan. The passive scan
* would have been enforced by the initial processing of our
* custom regulatory domain.
@@ -478,7 +478,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg,
wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
} else {
/*
- * This gets applied in the case of the absense of CRDA,
+ * This gets applied in the case of the absence of CRDA,
* it's our own custom world regulatory domain, similar to
* cfg80211's but we enable passive scanning.
*/
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 46e382ed46aa..39a11e8af4fa 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -439,7 +439,7 @@ static u8 mac_reader[] = {
};
struct atmel_private {
- void *card; /* Bus dependent stucture varies for PCcard */
+ void *card; /* Bus dependent structure varies for PCcard */
int (*present_callback)(void *); /* And callback which uses it */
char firmware_id[32];
AtmelFWType firmware_type;
@@ -3895,7 +3895,7 @@ static int reset_atmel_card(struct net_device *dev)
This routine is also responsible for initialising some
hardware-specific fields in the atmel_private structure,
- including a copy of the firmware's hostinfo stucture
+ including a copy of the firmware's hostinfo structure
which is the route into the rest of the firmware datastructures. */
struct atmel_private *priv = netdev_priv(dev);
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index c96e19da2949..05263516c113 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -99,7 +99,7 @@ static void atmel_detach(struct pcmcia_device *link)
}
/* Call-back function to interrogate PCMCIA-specific information
- about the current existance of the card */
+ about the current existence of the card */
static int card_present(void *arg)
{
struct pcmcia_device *link = (struct pcmcia_device *)arg;
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index bd4cb75b6ca3..229f4388f790 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -648,7 +648,7 @@ struct b43_request_fw_context {
char errors[B43_NR_FWTYPES][128];
/* Temporary buffer for storing the firmware name. */
char fwname[64];
- /* A fatal error occured while requesting. Firmware reqest
+ /* A fatal error occurred while requesting. Firmware reqest
* can not continue, as any other reqest will also fail. */
int fatal_failure;
};
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index a96e05a8ef7e..5a43984bdcea 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -72,6 +72,7 @@ MODULE_FIRMWARE("b43/ucode11.fw");
MODULE_FIRMWARE("b43/ucode13.fw");
MODULE_FIRMWARE("b43/ucode14.fw");
MODULE_FIRMWARE("b43/ucode15.fw");
+MODULE_FIRMWARE("b43/ucode16_mimo.fw");
MODULE_FIRMWARE("b43/ucode5.fw");
MODULE_FIRMWARE("b43/ucode9.fw");
@@ -4019,7 +4020,7 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
b43_mac_enable(dev);
b43_write32(dev, B43_MMIO_GEN_IRQ_MASK, dev->irq_mask);
- /* Start maintainance work */
+ /* Start maintenance work */
b43_periodic_tasks_setup(dev);
b43_leds_init(dev);
diff --git a/drivers/net/wireless/b43/phy_g.h b/drivers/net/wireless/b43/phy_g.h
index 8569fdd4c6be..5413c906a3e7 100644
--- a/drivers/net/wireless/b43/phy_g.h
+++ b/drivers/net/wireless/b43/phy_g.h
@@ -164,7 +164,7 @@ struct b43_phy_g {
/* Current Interference Mitigation mode */
int interfmode;
/* Stack of saved values from the Interference Mitigation code.
- * Each value in the stack is layed out as follows:
+ * Each value in the stack is laid out as follows:
* bit 0-11: offset
* bit 12-15: register ID
* bit 16-32: value
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 001e841f118c..e789a89f1047 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -703,7 +703,7 @@
#define B43_NPHY_CHAN_ESTHANG B43_PHY_N(0x21D) /* Channel estimate hang */
#define B43_NPHY_FINERX2_CGC B43_PHY_N(0x221) /* Fine RX 2 clock gate control */
#define B43_NPHY_FINERX2_CGC_DECGC 0x0008 /* Decode gated clocks */
-#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */
+#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power control init */
#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */
#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0
#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index c81b2f53b0c5..23583be1ee0b 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -488,7 +488,7 @@ struct b43legacy_phy {
/* Current Interference Mitigation mode */
int interfmode;
/* Stack of saved values from the Interference Mitigation code.
- * Each value in the stack is layed out as follows:
+ * Each value in the stack is laid out as follows:
* bit 0-11: offset
* bit 12-15: register ID
* bit 16-32: value
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 18d63f57777d..3d05dc15c6b8 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -2359,7 +2359,7 @@ int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
}
-/* Translate our list of Access Points & Stations to a card independant
+/* Translate our list of Access Points & Stations to a card independent
* format that the Wireless Tools will understand - Jean II */
int prism2_ap_translate_scan(struct net_device *dev,
struct iw_request_info *info, char *buffer)
diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h
index 655ceeba9612..334e2d0b8e11 100644
--- a/drivers/net/wireless/hostap/hostap_ap.h
+++ b/drivers/net/wireless/hostap/hostap_ap.h
@@ -114,7 +114,7 @@ struct sta_info {
* has passed since last received frame from the station, a nullfunc data
* frame is sent to the station. If this frame is not acknowledged and no other
* frames have been received, the station will be disassociated after
- * AP_DISASSOC_DELAY. Similarily, a the station will be deauthenticated after
+ * AP_DISASSOC_DELAY. Similarly, a the station will be deauthenticated after
* AP_DEAUTH_DELAY. AP_TIMEOUT_RESOLUTION is the resolution that is used with
* max inactivity timer. */
#define AP_MAX_INACTIVITY_SEC (5 * 60)
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 6038633ef361..12de46407c71 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -1945,7 +1945,7 @@ static char * __prism2_translate_scan(local_info_t *local,
}
-/* Translate scan data returned from the card to a card independant
+/* Translate scan data returned from the card to a card independent
* format that the Wireless Tools will understand - Jean II */
static inline int prism2_translate_scan(local_info_t *local,
struct iw_request_info *info,
@@ -2043,7 +2043,7 @@ static inline int prism2_ioctl_giwscan_sta(struct net_device *dev,
* until results are ready for various reasons.
* First, managing wait queues is complex and racy
* (there may be multiple simultaneous callers).
- * Second, we grab some rtnetlink lock before comming
+ * Second, we grab some rtnetlink lock before coming
* here (in dev_ioctl()).
* Third, the caller can wait on the Wireless Event
* - Jean II */
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 1d9aed645723..d5084829c9e5 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -79,13 +79,8 @@ struct net_device * hostap_add_interface(struct local_info *local,
if (!rtnl_locked)
rtnl_lock();
- ret = 0;
- if (strchr(dev->name, '%'))
- ret = dev_alloc_name(dev, dev->name);
-
SET_NETDEV_DEV(dev, mdev->dev.parent);
- if (ret >= 0)
- ret = register_netdevice(dev);
+ ret = register_netdevice(dev);
if (!rtnl_locked)
rtnl_unlock();
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index 1c66b3c1030d..88dc6a52bdf1 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -853,7 +853,7 @@ struct local_info {
struct work_struct comms_qual_update;
/* RSSI to dBm adjustment (for RX descriptor fields) */
- int rssi_to_dBm; /* substract from RSSI to get approximate dBm value */
+ int rssi_to_dBm; /* subtract from RSSI to get approximate dBm value */
/* BSS list / protected by local->lock */
struct list_head bss_list;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 4b97f918daff..44307753587d 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -63,7 +63,7 @@ When data is sent to the firmware, the first TBD is used to indicate to the
firmware if a Command or Data is being sent. If it is Command, all of the
command information is contained within the physical address referred to by the
TBD. If it is Data, the first TBD indicates the type of data packet, number
-of fragments, etc. The next TBD then referrs to the actual packet location.
+of fragments, etc. The next TBD then refers to the actual packet location.
The Tx flow cycle is as follows:
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 160881f234cc..42c3fe37af64 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -1181,7 +1181,7 @@ static void ipw_led_shutdown(struct ipw_priv *priv)
/*
* The following adds a new attribute to the sysfs representation
* of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
- * used for controling the debug level.
+ * used for controlling the debug level.
*
* See the level definitions in ipw for details.
*/
@@ -3763,7 +3763,7 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
if (!q->txb) {
- IPW_ERROR("vmalloc for auxilary BD structures failed\n");
+ IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
return -ENOMEM;
}
@@ -5581,7 +5581,7 @@ static int ipw_find_adhoc_network(struct ipw_priv *priv,
return 0;
}
- /* Verify privacy compatability */
+ /* Verify privacy compatibility */
if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
@@ -5808,7 +5808,7 @@ static int ipw_best_network(struct ipw_priv *priv,
return 0;
}
- /* Verify privacy compatability */
+ /* Verify privacy compatibility */
if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
@@ -8184,7 +8184,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
static int is_network_packet(struct ipw_priv *priv,
struct libipw_hdr_4addr *header)
{
- /* Filter incoming packets to determine if they are targetted toward
+ /* Filter incoming packets to determine if they are targeted toward
* this network, discarding packets coming from ourselves */
switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
@@ -8340,9 +8340,9 @@ static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
}
/*
- * Main entry function for recieving a packet with 80211 headers. This
+ * Main entry function for receiving a packet with 80211 headers. This
* should be called when ever the FW has notified us that there is a new
- * skb in the recieve queue.
+ * skb in the receive queue.
*/
static void ipw_rx(struct ipw_priv *priv)
{
@@ -8683,7 +8683,7 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
* functions defined in ipw_main to provide the HW interaction.
*
* The exception to this is the use of the ipw_get_ordinal()
- * function used to poll the hardware vs. making unecessary calls.
+ * function used to poll the hardware vs. making unnecessary calls.
*
*/
@@ -10419,7 +10419,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
memset(&dummystats, 0, sizeof(dummystats));
- /* Filtering of fragment chains is done agains the first fragment */
+ /* Filtering of fragment chains is done against the first fragment */
hdr = (void *)txb->fragments[0]->data;
if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
if (filter & IPW_PROM_NO_MGMT)
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 0de1b1893220..e5ad76cd77da 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -925,7 +925,7 @@ drop_free:
static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
/*
-* Make ther structure we read from the beacon packet has
+* Make the structure we read from the beacon packet to have
* the right values
*/
static int libipw_verify_qos_info(struct libipw_qos_information_element
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
index fbec88d48f1b..79ac081832fb 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
@@ -316,12 +316,18 @@ int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
- /* Find index into station table for destination station */
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop_unlock;
+ /* For management frames use broadcast id to do not break aggregation */
+ if (!ieee80211_is_data(fc))
+ sta_id = ctx->bcast_sta_id;
+ else {
+ /* Find index into station table for destination station */
+ sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+ hdr->addr1);
+ goto drop_unlock;
+ }
}
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 0073f9239197..d743373a9424 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -1032,7 +1032,7 @@ int iwl_legacy_apm_init(struct iwl_priv *priv)
/*
* Enable HAP INTA (interrupt from management bus) to
* wake device's PCI Express link L1a -> L0s
- * NOTE: This is no-op for 3945 (non-existant bit)
+ * NOTE: This is no-op for 3945 (non-existent bit)
*/
iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
index 4e20c7e5c883..6e6091816e36 100644
--- a/drivers/net/wireless/iwlegacy/iwl-fh.h
+++ b/drivers/net/wireless/iwlegacy/iwl-fh.h
@@ -436,7 +436,7 @@
* @finished_rb_num [0:11] - Indicates the index of the current RB
* in which the last frame was written to
* @finished_fr_num [0:11] - Indicates the index of the RX Frame
- * which was transfered
+ * which was transferred
*/
struct iwl_rb_status {
__le16 closed_rb_num;
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
index 15eb8b707157..bda0d61b2c0d 100644
--- a/drivers/net/wireless/iwlegacy/iwl-led.c
+++ b/drivers/net/wireless/iwlegacy/iwl-led.c
@@ -48,8 +48,21 @@ module_param(led_mode, int, S_IRUGO);
MODULE_PARM_DESC(led_mode, "0=system default, "
"1=On(RF On)/Off(RF Off), 2=blinking");
+/* Throughput OFF time(ms) ON time (ms)
+ * >300 25 25
+ * >200 to 300 40 40
+ * >100 to 200 55 55
+ * >70 to 100 65 65
+ * >50 to 70 75 75
+ * >20 to 50 85 85
+ * >10 to 20 95 95
+ * >5 to 10 110 110
+ * >1 to 5 130 130
+ * >0 to 1 167 167
+ * <=0 SOLID ON
+ */
static const struct ieee80211_tpt_blink iwl_blink[] = {
- { .throughput = 0 * 1024 - 1, .blink_time = 334 },
+ { .throughput = 0, .blink_time = 334 },
{ .throughput = 1 * 1024 - 1, .blink_time = 260 },
{ .throughput = 5 * 1024 - 1, .blink_time = 220 },
{ .throughput = 10 * 1024 - 1, .blink_time = 190 },
@@ -101,6 +114,11 @@ static int iwl_legacy_led_cmd(struct iwl_priv *priv,
if (priv->blink_on == on && priv->blink_off == off)
return 0;
+ if (off == 0) {
+ /* led is SOLID_ON */
+ on = IWL_LED_SOLID;
+ }
+
IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
priv->cfg->base_params->led_compensation);
led_cmd.on = iwl_legacy_blink_compensation(priv, on,
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
index 60f597f796ca..353234a02c6d 100644
--- a/drivers/net/wireless/iwlegacy/iwl-scan.c
+++ b/drivers/net/wireless/iwlegacy/iwl-scan.c
@@ -143,7 +143,7 @@ static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
iwl_legacy_force_scan_end(priv);
} else
- IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
+ IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
}
/**
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
index 47c9da3834ea..66f0fb2bbe00 100644
--- a/drivers/net/wireless/iwlegacy/iwl-sta.c
+++ b/drivers/net/wireless/iwlegacy/iwl-sta.c
@@ -110,7 +110,7 @@ static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
/*
* XXX: The MAC address in the command buffer is often changed from
* the original sent to the device. That is, the MAC address
- * written to the command buffer often is not the same MAC adress
+ * written to the command buffer often is not the same MAC address
* read from the command buffer when the command returns. This
* issue has not yet been resolved and this debugging is left to
* observe the problem.
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index f781b7e225b4..af2ae22fcfd3 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -2992,15 +2992,15 @@ static void iwl4965_bg_txpower_work(struct work_struct *work)
struct iwl_priv *priv = container_of(work, struct iwl_priv,
txpower_work);
+ mutex_lock(&priv->mutex);
+
/* If a scan happened to start before we got here
* then just return; the statistics notification will
* kick off another scheduled work to compensate for
* any temperature delta we missed here. */
if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
test_bit(STATUS_SCANNING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
+ goto out;
/* Regardless of if we are associated, we must reconfigure the
* TX power since frames can be sent on non-radar channels while
@@ -3010,7 +3010,7 @@ static void iwl4965_bg_txpower_work(struct work_struct *work)
/* Update last_temperature to keep is_calib_needed from running
* when it isn't needed... */
priv->last_temperature = priv->temperature;
-
+out:
mutex_unlock(&priv->mutex);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
index 34d77d22a360..0d5fda44c3a3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -67,7 +67,7 @@ int iwl_alloc_isr_ict(struct iwl_priv *priv)
if (!priv->_agn.ict_tbl_vir)
return -ENOMEM;
- /* align table to PAGE_SIZE boundry */
+ /* align table to PAGE_SIZE boundary */
priv->_agn.aligned_ict_tbl_dma = ALIGN(priv->_agn.ict_tbl_dma, PAGE_SIZE);
IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 7c1becf9e2c1..342de780a366 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -580,12 +580,17 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr_len = ieee80211_hdrlen(fc);
- /* Find index into station table for destination station */
- sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop_unlock_priv;
+ /* For management frames use broadcast id to do not break aggregation */
+ if (!ieee80211_is_data(fc))
+ sta_id = ctx->bcast_sta_id;
+ else {
+ /* Find index into station table for destination station */
+ sta_id = iwl_sta_id_or_broadcast(priv, ctx, info->control.sta);
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+ hdr->addr1);
+ goto drop_unlock_priv;
+ }
}
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index b90924e890a7..6dfa806aefec 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -434,7 +434,7 @@
* @finished_rb_num [0:11] - Indicates the index of the current RB
* in which the last frame was written to
* @finished_fr_num [0:11] - Indicates the index of the RX Frame
- * which was transfered
+ * which was transferred
*/
struct iwl_rb_status {
__le16 closed_rb_num;
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index c517f6d8a15f..d60d630cb93a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -143,7 +143,7 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
iwl_force_scan_end(priv);
} else
- IWL_DEBUG_SCAN(priv, "Sucessfully send scan abort\n");
+ IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
}
/**
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
index 907ac890997c..1cabcb39643f 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -57,7 +57,7 @@
* This is due to the fact the host talks exclusively
* to the UMAC and so there needs to be a special UMAC
* command for talking to the LMAC.
- * This is how a wifi command is layed out:
+ * This is how a wifi command is laid out:
* ------------------------
* | iwm_udma_out_wifi_hdr |
* ------------------------
@@ -72,7 +72,7 @@
* Those commands are handled by the device's bootrom,
* and are typically sent when the UMAC and the LMAC
* are not yet available.
- * * This is how a non-wifi command is layed out:
+ * * This is how a non-wifi command is laid out:
* ---------------------------
* | iwm_udma_out_nonwifi_hdr |
* ---------------------------
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
index 3216621fc55a..be98074c0608 100644
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -197,7 +197,7 @@ int iwm_tx_credit_alloc(struct iwm_priv *iwm, int id, int nb)
spin_lock(&iwm->tx_credit.lock);
if (!iwm_tx_credit_ok(iwm, id, nb)) {
- IWM_DBG_TX(iwm, DBG, "No credit avaliable for pool[%d]\n", id);
+ IWM_DBG_TX(iwm, DBG, "No credit available for pool[%d]\n", id);
ret = -ENOSPC;
goto out;
}
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index 60fd1afe89ac..1453eec82a99 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -70,7 +70,7 @@ rdrf
These commands are used to read the MAC, BBP and RF registers from the
card. These commands take one parameter that specifies the offset
location that is to be read. This parameter must be specified in
- hexadecimal (its possible to preceed preceding the number with a "0x").
+ hexadecimal (its possible to precede preceding the number with a "0x").
Path: /sys/kernel/debug/libertas_wireless/ethX/registers/
@@ -84,7 +84,7 @@ wrrf
These commands are used to write the MAC, BBP and RF registers in the
card. These commands take two parameters that specify the offset
location and the value that is to be written. This parameters must
- be specified in hexadecimal (its possible to preceed the number
+ be specified in hexadecimal (its possible to precede the number
with a "0x").
Usage:
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index a2e88498c096..5d637af2d7c3 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1360,7 +1360,7 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
* we remove all keys like in the WPA/WPA2 setup,
* we just don't set RSN.
*
- * Therefore: fall-throught
+ * Therefore: fall-through
*/
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
diff --git a/drivers/net/wireless/libertas/if_spi.h b/drivers/net/wireless/libertas/if_spi.h
index 9745bd407d76..e450e31fd11d 100644
--- a/drivers/net/wireless/libertas/if_spi.h
+++ b/drivers/net/wireless/libertas/if_spi.h
@@ -66,7 +66,7 @@
#define IF_SPI_HOST_INT_CTRL_REG 0x40 /* Host interrupt controller reg */
#define IF_SPI_CARD_INT_CAUSE_REG 0x44 /* Card interrupt cause reg */
-#define IF_SPI_CARD_INT_STATUS_REG 0x48 /* Card interupt status reg */
+#define IF_SPI_CARD_INT_STATUS_REG 0x48 /* Card interrupt status reg */
#define IF_SPI_CARD_INT_EVENT_MASK_REG 0x4C /* Card interrupt event mask */
#define IF_SPI_CARD_INT_STATUS_MASK_REG 0x50 /* Card interrupt status mask */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 56f439d58013..9d4a40ee16c4 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -62,7 +62,7 @@ MODULE_PARM_DESC(fake_hw_scan, "Install fake (no-op) hw-scan handler");
* an intersection to occur but each device will still use their
* respective regulatory requested domains. Subsequent radios will
* use the resulting intersection.
- * @HWSIM_REGTEST_WORLD_ROAM: Used for testing the world roaming. We acomplish
+ * @HWSIM_REGTEST_WORLD_ROAM: Used for testing the world roaming. We accomplish
* this by using a custom beacon-capable regulatory domain for the first
* radio. All other device world roam.
* @HWSIM_REGTEST_CUSTOM_WORLD: Used for testing the custom world regulatory
@@ -1515,19 +1515,10 @@ static int __init init_mac80211_hwsim(void)
if (hwsim_mon == NULL)
goto failed;
- rtnl_lock();
-
- err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
+ err = register_netdev(hwsim_mon);
if (err < 0)
goto failed_mon;
-
- err = register_netdevice(hwsim_mon);
- if (err < 0)
- goto failed_mon;
-
- rtnl_unlock();
-
return 0;
failed_mon:
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 44957cac61e1..f0582259c935 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -705,10 +705,6 @@ static struct mwifiex_private *mwifiex_add_interface(
dev_err(adapter->dev, "no memory available for netdevice\n");
goto error;
}
- if (dev_alloc_name(dev, dev->name)) {
- dev_err(adapter->dev, "unable to alloc name for netdevice\n");
- goto error;
- }
if (mwifiex_register_cfg80211(dev, adapter->priv[bss_index]->curr_addr,
adapter->priv[bss_index]) != 0) {
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index b4772c1c6135..3c7877a7c31c 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -1031,7 +1031,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
else
buf.tsc[4] = 0x10;
- /* Wait upto 100ms for tx queue to empty */
+ /* Wait up to 100ms for tx queue to empty */
for (k = 100; k > 0; k--) {
udelay(1000);
ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY,
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index c5c1254ec093..a5a6d9e647bb 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -494,7 +494,7 @@ static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
if (slot < 0) {
/*
- * The device supports the choosen algorithm, but the
+ * The device supports the chosen algorithm, but the
* firmware does not provide enough key slots to store
* all of them.
* But encryption offload for outgoing frames is always
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 7ecc0bda57b3..6d9204fef90b 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -287,7 +287,7 @@ static void p54spi_power_on(struct p54s_priv *priv)
enable_irq(gpio_to_irq(p54spi_gpio_irq));
/*
- * need to wait a while before device can be accessed, the lenght
+ * need to wait a while before device can be accessed, the length
* is just a guess
*/
msleep(10);
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index d44f8e20cce0..266d45bf86f5 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -113,7 +113,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
* be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
* and add WDS address information */
if (likely(((long) skb->data & 0x03) | init_wds)) {
- /* get the number of bytes to add and re-allign */
+ /* get the number of bytes to add and re-align */
offset = (4 - (long) skb->data) & 0x03;
offset += init_wds ? 6 : 0;
@@ -342,7 +342,7 @@ islpci_eth_receive(islpci_private *priv)
priv->pci_map_rx_address[index],
MAX_FRAGMENT_SIZE_RX + 2, PCI_DMA_FROMDEVICE);
- /* update the skb structure and allign the buffer */
+ /* update the skb structure and align the buffer */
skb_put(skb, size);
if (offset) {
/* shift the buffer allocation offset bytes to get the right frame */
diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h
index 49d9b267bc0f..d7646f299bd3 100644
--- a/drivers/net/wireless/rayctl.h
+++ b/drivers/net/wireless/rayctl.h
@@ -578,7 +578,7 @@ struct tx_msg {
UCHAR var[1];
};
-/****** ECF Receive Control Stucture (RCS) Area at Shared RAM offset 0x0800 */
+/****** ECF Receive Control Structure (RCS) Area at Shared RAM offset 0x0800 */
/* Structures for command specific parameters (rcs.var) */
struct rx_packet_cmd {
UCHAR rx_data_ptr[2];
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 47a04d2dad4b..f67bc9b31b28 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -618,7 +618,7 @@
* READ_CONTROL: 0 write BBP, 1 read BBP
* BUSY: ASIC is busy executing BBP commands
* BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
- * BBP_RW_MODE: 0 serial, 1 paralell
+ * BBP_RW_MODE: 0 serial, 1 parallel
*/
#define BBP_CSR_CFG 0x101c
#define BBP_CSR_CFG_VALUE FIELD32(0x000000ff)
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 93fb67491acc..2a6aa85cc6c9 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -1550,7 +1550,7 @@ static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
if (rf->channel > 14) {
/*
* When TX power is below 0, we should increase it by 7 to
- * make it a positive value (Minumum value is -7).
+ * make it a positive value (Minimum value is -7).
* However this means that values between 0 and 7 have
* double meaning, and we should set a 7DBm boost flag.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 73d3332be614..c446db69bd3c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -490,13 +490,13 @@ struct rt2x00intf_conf {
enum nl80211_iftype type;
/*
- * TSF sync value, this is dependant on the operation type.
+ * TSF sync value, this is dependent on the operation type.
*/
enum tsf_sync sync;
/*
- * The MAC and BSSID addressess are simple array of bytes,
- * these arrays are little endian, so when sending the addressess
+ * The MAC and BSSID addresses are simple array of bytes,
+ * these arrays are little endian, so when sending the addresses
* to the drivers, copy the it into a endian-signed variable.
*
* Note that all devices (except rt2500usb) have 32 bits
@@ -1173,7 +1173,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue);
* @drop: True to drop all pending frames.
*
* This function will flush the queue. After this call
- * the queue is guarenteed to be empty.
+ * the queue is guaranteed to be empty.
*/
void rt2x00queue_flush_queue(struct data_queue *queue, bool drop);
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index edebbf04bc57..555180d8f4aa 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -60,7 +60,7 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
* Note that when NULL is passed as address we will send
* 00:00:00:00:00 to the device to clear the address.
* This will prevent the device being confused when it wants
- * to ACK frames or consideres itself associated.
+ * to ACK frames or considers itself associated.
*/
memset(conf.mac, 0, sizeof(conf.mac));
if (mac)
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index e1e0c51fcde8..1bb9d46077ff 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -237,7 +237,7 @@ void rt2x00crypto_rx_insert_iv(struct sk_buff *skb,
}
/*
- * NOTE: Always count the payload as transfered,
+ * NOTE: Always count the payload as transferred,
* even when alignment was set to zero. This is required
* for determining the correct offset for the ICV data.
*/
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index 5d6e0b83151f..063ebcce97f8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -51,7 +51,7 @@
* [rt2x00dump header][hardware descriptor][ieee802.11 frame]
*
* rt2x00dump header: The description of the dumped frame, as well as
- * additional information usefull for debugging. See &rt2x00dump_hdr.
+ * additional information useful for debugging. See &rt2x00dump_hdr.
* hardware descriptor: Descriptor that was used to receive or transmit
* the frame.
* ieee802.11 frame: The actual frame that was received or transmitted.
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index fa55399be192..ea10b0068f82 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -273,7 +273,7 @@ void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev)
/**
* While scanning, link tuning is disabled. By default
* the most sensitive settings will be used to make sure
- * that all beacons and probe responses will be recieved
+ * that all beacons and probe responses will be received
* during the scan.
*/
if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags))
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 56f9d0df9c61..ab8c16f8bcaf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -615,7 +615,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
}
/*
- * When DMA allocation is required we should guarentee to the
+ * When DMA allocation is required we should guarantee to the
* driver that the DMA is aligned to a 4-byte boundary.
* However some drivers require L2 padding to pad the payload
* rather then the header. This could be a requirement for
@@ -760,7 +760,7 @@ bool rt2x00queue_for_each_entry(struct data_queue *queue,
spin_unlock_irqrestore(&queue->index_lock, irqflags);
/*
- * Start from the TX done pointer, this guarentees that we will
+ * Start from the TX done pointer, this guarantees that we will
* send out all frames in the correct order.
*/
if (index_start < index_end) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 36f4d03eff61..167d45873dca 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -345,8 +345,8 @@ struct txentry_desc {
* only be touched after the device has signaled it is done with it.
* @ENTRY_DATA_PENDING: This entry contains a valid frame and is waiting
* for the signal to start sending.
- * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occured
- * while transfering the data to the hardware. No TX status report will
+ * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occurred
+ * while transferring the data to the hardware. No TX status report will
* be expected from the hardware.
* @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
* returned. It is now waiting for the status reporting before the
@@ -367,7 +367,7 @@ enum queue_entry_flags {
* @last_action: Timestamp of last change.
* @queue: The data queue (&struct data_queue) to which this entry belongs.
* @skb: The buffer which is currently being transmitted (for TX queue),
- * or used to directly recieve data in (for RX queue).
+ * or used to directly receive data in (for RX queue).
* @entry_idx: The entry index number.
* @priv_data: Private data belonging to this queue entry. The pointer
* points to data specific to a particular driver and queue type.
@@ -391,7 +391,7 @@ struct queue_entry {
* @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
* owned by the hardware then the queue is considered to be full.
* @Q_INDEX_DMA_DONE: Index pointer for the next entry which will have been
- * transfered to the hardware.
+ * transferred to the hardware.
* @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
* the hardware and for which we need to run the txdone handler. If this
* entry is not owned by the hardware the queue is considered to be empty.
@@ -635,7 +635,7 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
}
/**
- * rt2x00queue_status_timeout - Check if a timeout occured for STATUS reports
+ * rt2x00queue_status_timeout - Check if a timeout occurred for STATUS reports
* @entry: Queue entry to check.
*/
static inline int rt2x00queue_status_timeout(struct queue_entry *entry)
@@ -646,7 +646,7 @@ static inline int rt2x00queue_status_timeout(struct queue_entry *entry)
}
/**
- * rt2x00queuedma__timeout - Check if a timeout occured for DMA transfers
+ * rt2x00queue_dma_timeout - Check if a timeout occurred for DMA transfers
* @entry: Queue entry to check.
*/
static inline int rt2x00queue_dma_timeout(struct queue_entry *entry)
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index dc6b662ad5f1..8f90f6268077 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -226,7 +226,7 @@ static void rt2x00usb_work_txdone_entry(struct queue_entry *entry)
/*
* If the transfer to hardware succeeded, it does not mean the
* frame was send out correctly. It only means the frame
- * was succesfully pushed to the hardware, we have no
+ * was successfully pushed to the hardware, we have no
* way to determine the transmission status right now.
* (Only indirectly by looking at the failed TX counters
* in the register).
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index a69f18758871..323ca7b2b095 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -413,7 +413,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop);
* @rt2x00dev: Pointer to &struct rt2x00_dev
*
* Check the health of the USB communication and determine
- * if timeouts have occured. If this is the case, this function
+ * if timeouts have occurred. If this is the case, this function
* will reset all communication to restore functionality again.
*/
void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index d3666573d6b8..ccb6da38fe22 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -698,7 +698,7 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
*because hw will nerver use hw_rate
*when tcb_desc->use_driver_rate = false
*so we never set highest N rate here,
- *and N rate will all be controled by FW
+ *and N rate will all be controlled by FW
*when tcb_desc->use_driver_rate = false
*/
if (sta && (sta->ht_cap.ht_supported)) {
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
index 714858abc4ac..8f6718f163e5 100644
--- a/drivers/net/wireless/rtlwifi/regd.c
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -233,7 +233,7 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
}
/*
- *If a country IE has been recieved check its rule for this
+ *If a country IE has been received check its rule for this
*channel first before enabling active scan. The passive scan
*would have been enforced by the initial processing of our
*custom regulatory domain.
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index a406c616b697..693395ee98f9 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1598,7 +1598,7 @@ struct rtl_priv {
/***************************************
- Bluetooth Co-existance Related
+ Bluetooth Co-existence Related
****************************************/
enum bt_ant_num {
diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/wl1251/cmd.c
index 0ade4bd617c0..81f164bc4888 100644
--- a/drivers/net/wireless/wl1251/cmd.c
+++ b/drivers/net/wireless/wl1251/cmd.c
@@ -104,7 +104,7 @@ int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer)
* @wl: wl struct
* @id: acx id
* @buf: buffer for the response, including all headers, must work with dma
- * @len: lenght of buf
+ * @len: length of buf
*/
int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len)
{
diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/wl1251/rx.c
index c1b3b3f03da2..6af35265c900 100644
--- a/drivers/net/wireless/wl1251/rx.c
+++ b/drivers/net/wireless/wl1251/rx.c
@@ -179,7 +179,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
rx_buffer = skb_put(skb, length);
wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);
- /* The actual lenght doesn't include the target's alignment */
+ /* The actual length doesn't include the target's alignment */
skb->len = desc->length - PLCP_HEADER_LENGTH;
fc = (u16 *)skb->data;
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 2116a376c3f3..42935ac72663 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -454,7 +454,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
* @wl: wl struct
* @id: acx id
* @buf: buffer for the response, including all headers, must work with dma
- * @len: lenght of buf
+ * @len: length of buf
*/
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
{
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index ba558fcc76d0..1ab6c86aac40 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -528,7 +528,7 @@ struct conf_rx_settings {
#define CONF_TX_RATE_RETRY_LIMIT 10
/*
- * Rates supported for data packets when operating as AP. Note the absense
+ * Rates supported for data packets when operating as AP. Note the absence
* of the 22Mbps rate. There is a FW limitation on 12 rates so we must drop
* one. The rate dropped is not mandatory under any operating mode.
*/
@@ -616,7 +616,7 @@ enum conf_tx_ac {
CONF_TX_AC_BK = 1, /* background */
CONF_TX_AC_VI = 2, /* video */
CONF_TX_AC_VO = 3, /* voice */
- CONF_TX_AC_CTS2SELF = 4, /* fictious AC, follows AC_VO */
+ CONF_TX_AC_CTS2SELF = 4, /* fictitious AC, follows AC_VO */
CONF_TX_AC_ANY_TID = 0x1f
};
@@ -1210,7 +1210,7 @@ struct conf_memory_settings {
/*
* Minimum required free tx memory blocks in order to assure optimum
- * performence
+ * performance
*
* Range: 0-120
*/
@@ -1218,7 +1218,7 @@ struct conf_memory_settings {
/*
* Minimum required free rx memory blocks in order to assure optimum
- * performence
+ * performance
*
* Range: 0-120
*/
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index 36e185583ec4..beed621a8ae0 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -94,7 +94,7 @@ static inline int wl1271_translate_addr(struct wl1271 *wl, int addr)
* translated region.
*
* The translated regions occur next to each other in physical device
- * memory, so just add the sizes of the preceeding address regions to
+ * memory, so just add the sizes of the preceding address regions to
* get the offset to the new region.
*
* Currently, only the two first regions are addressed, and the
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 3e5befe4d03b..fc08f36fe1f5 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -290,7 +290,7 @@ static void wl3501_get_from_wla(struct wl3501_card *this, u16 src, void *dest,
* \ \- IEEE 802.11 -/ \-------------- len --------------/
* \-struct wl3501_80211_tx_hdr--/ \-------- Ethernet Frame -------/
*
- * Return = Postion in Card
+ * Return = Position in Card
*/
static u16 wl3501_get_tx_buffer(struct wl3501_card *this, u16 len)
{
@@ -1932,7 +1932,7 @@ static int wl3501_config(struct pcmcia_device *link)
this->base_addr = dev->base_addr;
if (!wl3501_get_flash_mac_addr(this)) {
- printk(KERN_WARNING "%s: Cant read MAC addr in flash ROM?\n",
+ printk(KERN_WARNING "%s: Can't read MAC addr in flash ROM?\n",
dev->name);
unregister_netdev(dev);
goto failed;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
index 032542614259..784d9ccb8fef 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_rf2959.c
@@ -169,7 +169,7 @@ static int rf2959_init_hw(struct zd_rf *rf)
{ ZD_CR85, 0x00 }, { ZD_CR86, 0x10 }, { ZD_CR87, 0x2A },
{ ZD_CR88, 0x10 }, { ZD_CR89, 0x24 }, { ZD_CR90, 0x18 },
/* { ZD_CR91, 0x18 }, */
- /* should solve continous CTS frame problems */
+ /* should solve continuous CTS frame problems */
{ ZD_CR91, 0x00 },
{ ZD_CR92, 0x0a }, { ZD_CR93, 0x00 }, { ZD_CR94, 0x01 },
{ ZD_CR95, 0x00 }, { ZD_CR96, 0x40 }, { ZD_CR97, 0x37 },
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
index 860b0af7dc3e..c4d324e19c24 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
@@ -355,7 +355,7 @@ static int uw2453_init_hw(struct zd_rf *rf)
};
static const u32 rv[] = {
- UW2453_REGWRITE(4, 0x2b), /* configure reciever gain */
+ UW2453_REGWRITE(4, 0x2b), /* configure receiver gain */
UW2453_REGWRITE(5, 0x19e4f), /* configure transmitter gain */
UW2453_REGWRITE(6, 0xf81ad), /* enable RX/TX filter tuning */
UW2453_REGWRITE(7, 0x3fffe), /* disable TX gain in test mode */
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 5d7bbf2b2ee7..8753e6ddff8f 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -73,9 +73,6 @@ struct xenvif {
struct vm_struct *tx_comms_area;
struct vm_struct *rx_comms_area;
- /* Flags that must not be set in dev->features */
- u32 features_disabled;
-
/* Frontend feature information. */
u8 can_sg:1;
u8 gso:1;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index de569cc19da4..0ca86f9ec4ed 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,69 +165,18 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu)
return 0;
}
-static void xenvif_set_features(struct xenvif *vif)
-{
- struct net_device *dev = vif->dev;
- u32 features = dev->features;
-
- if (vif->can_sg)
- features |= NETIF_F_SG;
- if (vif->gso || vif->gso_prefix)
- features |= NETIF_F_TSO;
- if (vif->csum)
- features |= NETIF_F_IP_CSUM;
-
- features &= ~(vif->features_disabled);
-
- if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN)
- dev->mtu = ETH_DATA_LEN;
-
- dev->features = features;
-}
-
-static int xenvif_set_tx_csum(struct net_device *dev, u32 data)
-{
- struct xenvif *vif = netdev_priv(dev);
- if (data) {
- if (!vif->csum)
- return -EOPNOTSUPP;
- vif->features_disabled &= ~NETIF_F_IP_CSUM;
- } else {
- vif->features_disabled |= NETIF_F_IP_CSUM;
- }
-
- xenvif_set_features(vif);
- return 0;
-}
-
-static int xenvif_set_sg(struct net_device *dev, u32 data)
+static u32 xenvif_fix_features(struct net_device *dev, u32 features)
{
struct xenvif *vif = netdev_priv(dev);
- if (data) {
- if (!vif->can_sg)
- return -EOPNOTSUPP;
- vif->features_disabled &= ~NETIF_F_SG;
- } else {
- vif->features_disabled |= NETIF_F_SG;
- }
- xenvif_set_features(vif);
- return 0;
-}
+ if (!vif->can_sg)
+ features &= ~NETIF_F_SG;
+ if (!vif->gso && !vif->gso_prefix)
+ features &= ~NETIF_F_TSO;
+ if (!vif->csum)
+ features &= ~NETIF_F_IP_CSUM;
-static int xenvif_set_tso(struct net_device *dev, u32 data)
-{
- struct xenvif *vif = netdev_priv(dev);
- if (data) {
- if (!vif->gso && !vif->gso_prefix)
- return -EOPNOTSUPP;
- vif->features_disabled &= ~NETIF_F_TSO;
- } else {
- vif->features_disabled |= NETIF_F_TSO;
- }
-
- xenvif_set_features(vif);
- return 0;
+ return features;
}
static const struct xenvif_stat {
@@ -274,12 +223,6 @@ static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
}
static struct ethtool_ops xenvif_ethtool_ops = {
- .get_tx_csum = ethtool_op_get_tx_csum,
- .set_tx_csum = xenvif_set_tx_csum,
- .get_sg = ethtool_op_get_sg,
- .set_sg = xenvif_set_sg,
- .get_tso = ethtool_op_get_tso,
- .set_tso = xenvif_set_tso,
.get_link = ethtool_op_get_link,
.get_sset_count = xenvif_get_sset_count,
@@ -293,6 +236,7 @@ static struct net_device_ops xenvif_netdev_ops = {
.ndo_open = xenvif_open,
.ndo_stop = xenvif_close,
.ndo_change_mtu = xenvif_change_mtu,
+ .ndo_fix_features = xenvif_fix_features,
};
struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -331,7 +275,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->credit_timeout.expires = jiffies;
dev->netdev_ops = &xenvif_netdev_ops;
- xenvif_set_features(vif);
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ dev->features = dev->hw_features;
SET_ETHTOOL_OPS(dev, &xenvif_ethtool_ops);
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
@@ -367,8 +312,6 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
if (vif->irq)
return 0;
- xenvif_set_features(vif);
-
err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
@@ -384,9 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
xenvif_get(vif);
rtnl_lock();
- netif_carrier_on(vif->dev);
if (netif_running(vif->dev))
xenvif_up(vif);
+ if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
+ dev_set_mtu(vif->dev, ETH_DATA_LEN);
+ netdev_update_features(vif->dev);
+ netif_carrier_on(vif->dev);
rtnl_unlock();
return 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 22b8c3505991..1ce729d6af75 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -26,7 +26,7 @@ struct backend_info {
struct xenvif *vif;
enum xenbus_state frontend_state;
struct xenbus_watch hotplug_status_watch;
- int have_hotplug_status_watch:1;
+ u8 have_hotplug_status_watch:1;
};
static int connect_rings(struct backend_info *);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 5c8d9c385be0..db9a763aaa7f 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1140,6 +1140,42 @@ static void xennet_uninit(struct net_device *dev)
gnttab_free_grant_references(np->gref_rx_head);
}
+static u32 xennet_fix_features(struct net_device *dev, u32 features)
+{
+ struct netfront_info *np = netdev_priv(dev);
+ int val;
+
+ if (features & NETIF_F_SG) {
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
+ "%d", &val) < 0)
+ val = 0;
+
+ if (!val)
+ features &= ~NETIF_F_SG;
+ }
+
+ if (features & NETIF_F_TSO) {
+ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
+ "feature-gso-tcpv4", "%d", &val) < 0)
+ val = 0;
+
+ if (!val)
+ features &= ~NETIF_F_TSO;
+ }
+
+ return features;
+}
+
+static int xennet_set_features(struct net_device *dev, u32 features)
+{
+ if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
+ netdev_info(dev, "Reducing MTU because no SG offload");
+ dev->mtu = ETH_DATA_LEN;
+ }
+
+ return 0;
+}
+
static const struct net_device_ops xennet_netdev_ops = {
.ndo_open = xennet_open,
.ndo_uninit = xennet_uninit,
@@ -1148,6 +1184,8 @@ static const struct net_device_ops xennet_netdev_ops = {
.ndo_change_mtu = xennet_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_fix_features = xennet_fix_features,
+ .ndo_set_features = xennet_set_features,
};
static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
@@ -1209,7 +1247,17 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
netdev->netdev_ops = &xennet_netdev_ops;
netif_napi_add(netdev, &np->napi, xennet_poll, 64);
- netdev->features = NETIF_F_IP_CSUM;
+ netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_GSO_ROBUST;
+ netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
+
+ /*
+ * Assume that all hw features are available for now. This set
+ * will be adjusted by the call to netdev_update_features() in
+ * xennet_connect() which is the earliest point where we can
+ * negotiate with the backend regarding supported features.
+ */
+ netdev->features |= netdev->hw_features;
SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
SET_NETDEV_DEV(netdev, &dev->dev);
@@ -1416,8 +1464,7 @@ static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
goto fail;
err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
- IRQF_SAMPLE_RANDOM, netdev->name,
- netdev);
+ 0, netdev->name, netdev);
if (err < 0)
goto fail;
netdev->irq = err;
@@ -1510,54 +1557,6 @@ again:
return err;
}
-static int xennet_set_sg(struct net_device *dev, u32 data)
-{
- if (data) {
- struct netfront_info *np = netdev_priv(dev);
- int val;
-
- if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
- "%d", &val) < 0)
- val = 0;
- if (!val)
- return -ENOSYS;
- } else if (dev->mtu > ETH_DATA_LEN)
- dev->mtu = ETH_DATA_LEN;
-
- return ethtool_op_set_sg(dev, data);
-}
-
-static int xennet_set_tso(struct net_device *dev, u32 data)
-{
- if (data) {
- struct netfront_info *np = netdev_priv(dev);
- int val;
-
- if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
- "feature-gso-tcpv4", "%d", &val) < 0)
- val = 0;
- if (!val)
- return -ENOSYS;
- }
-
- return ethtool_op_set_tso(dev, data);
-}
-
-static void xennet_set_features(struct net_device *dev)
-{
- /* Turn off all GSO bits except ROBUST. */
- dev->features &= ~NETIF_F_GSO_MASK;
- dev->features |= NETIF_F_GSO_ROBUST;
- xennet_set_sg(dev, 0);
-
- /* We need checksum offload to enable scatter/gather and TSO. */
- if (!(dev->features & NETIF_F_IP_CSUM))
- return;
-
- if (!xennet_set_sg(dev, 1))
- xennet_set_tso(dev, 1);
-}
-
static int xennet_connect(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
@@ -1582,7 +1581,7 @@ static int xennet_connect(struct net_device *dev)
if (err)
return err;
- xennet_set_features(dev);
+ netdev_update_features(dev);
spin_lock_bh(&np->rx_lock);
spin_lock_irq(&np->tx_lock);
@@ -1710,9 +1709,6 @@ static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
static const struct ethtool_ops xennet_ethtool_ops =
{
- .set_tx_csum = ethtool_op_set_tx_csum,
- .set_sg = xennet_set_sg,
- .set_tso = xennet_set_tso,
.get_link = ethtool_op_get_link,
.get_sset_count = xennet_get_sset_count,
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 2642af4ee491..372572c0adc6 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -786,7 +786,7 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* @reg: register number to write to
* @val: value to write to the register number specified by reg
*
- * This fucntion waits till the device is ready to accept a new MDIO
+ * This function waits till the device is ready to accept a new MDIO
* request and then writes the val to the MDIO Write Data register.
*/
static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index ae07b3dfbcc1..ec2800ff8d42 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -652,7 +652,7 @@ static irqreturn_t znet_interrupt(int irq, void *dev_id)
dev->stats.tx_errors++;
/* Transceiver may be stuck if cable
- * was removed while emiting a
+ * was removed while emitting a
* packet. Flip it off, then on to
* reset it. This is very empirical,
* but it seems to work. */